View Source rocksdb (rocksdb v1.9.0)
Summary
Functions
batch_delete/2
but apply the operation to a column familybatch_mege/3
but apply the operation to a column familybatch_put/3
but apply the operation to a column familybatch_single_delete/2
but apply the operation to a column family{capacity, integer >=0}
the maximum configured capacity of the cache. * {strict_capacity, boolean}
the flag whether to return error on insertion when cache reaches its full capacity. * {usage, integer >=0}
the memory size for the entries residing in the cache. * {pinned_usage, integer >= 0}
the memory size for the entries in use by the systemCompact the underlying storage for the key range [*begin,*end]. The actual compaction interval might be superset of [*begin, *end]. In particular, deleted and overwritten versions are discarded, and the data is rearranged to reduce the cost of operations needed to access the data. This operation should typically only be invoked by users who understand the underlying implementation.
compact_range/3
but for a column familyReturn the approximate number of keys in the default column family. Implemented by calling GetIntProperty with "rocksdb.estimate-num-keys"
Return the approximate number of keys in the specified column family.
Removes the database entries in the range ["BeginKey", "EndKey"), i.e., including "BeginKey" and excluding "EndKey". Returns OK on success, and a non-OK status on error. It is not an error if no keys exist in the range ["BeginKey", "EndKey").
delete_range/3
but for a column familyCalls Fun(Elem, AccIn) on successive elements in the default column family starting with AccIn == Acc0. Fun/2 must return a new accumulator which is passed to the next call. The function returns the final value of the accumulator. Acc0 is returned if the default column family is empty.
Calls Fun(Elem, AccIn) on successive elements in the specified column family Other specs are same with fold/4
Calls Fun(Elem, AccIn) on successive elements in the default column family starting with AccIn == Acc0. Fun/2 must return a new accumulator which is passed to the next call. The function returns the final value of the accumulator. Acc0 is returned if the default column family is empty.
Calls Fun(Elem, AccIn) on successive elements in the specified column family Other specs are same with fold_keys/4
For each i in [0,n-1], store in "Sizes[i]", the approximate file system space used by keys in "[range[i].start .. range[i].limit)".
create new SstFileManager that can be shared among multiple RocksDB instances to track SST file and control there deletion rate.
create a new WriteBufferManager. a WriteBufferManager is for managing memory allocation for one or more MemTables.
Open RocksDB with TTL support This API should be used to open the db when key-values inserted are meant to be removed from the db in a non-strict TTL
amount of time Therefore, this guarantees that key-values inserted will remain in the db for >= TTL amount of time and the db will make efforts to remove the key-values as soon as possible after ttl seconds of their insertion.
Remove the database entry for "key". Requires that the key exists and was not overwritten. Returns OK on success, and a non-OK status on error. It is not an error if "key" did not exist in the database.
single_delete/3
but on the specified column familyset certains flags for the SST file manager * max_allowed_space_usage
: Update the maximum allowed space that should be used by RocksDB, if the total size of the SST files exceeds MaxAllowedSpace, writes to RocksDB will fail.
return informations of a Sst File Manager as a list of tuples.
tlog_nex_binary_update/1
but also return the batch as a list of operationstransaction_delete/2
but apply the operation to a column familytransaction_get/3
but apply the operation to a column familytransaction_put/3
but apply the operation to a column familybatch_*
API instead.Types
-type access_hint() :: normal | sequential | willneed | none.
-opaque backup_engine()
-type backup_info() :: #{id := non_neg_integer(), timestamp := non_neg_integer(), size := non_neg_integer(), number_files := non_neg_integer()}.
-opaque batch_handle()
-type block_based_table_options() :: [{no_block_cache, boolean()} | {block_size, pos_integer()} | {block_cache, cache_handle()} | {block_cache_size, pos_integer()} | {bloom_filter_policy, BitsPerKey :: pos_integer()} | {format_version, 0 | 1 | 2 | 3 | 4 | 5} | {cache_index_and_filter_blocks, boolean()}].
-opaque cache_handle()
-type cache_type() :: lru | clock.
-type cf_descriptor() :: {string(), cf_options()}.
-opaque cf_handle()
-type cf_options() :: [{block_cache_size_mb_for_point_lookup, non_neg_integer()} | {memtable_memory_budget, pos_integer()} | {write_buffer_size, pos_integer()} | {max_write_buffer_number, pos_integer()} | {min_write_buffer_number_to_merge, pos_integer()} | {enable_blob_files, boolean()} | {min_blob_size, non_neg_integer()} | {blob_file_size, non_neg_integer()} | {blob_compression_type, compression_type()} | {enable_blob_garbage_collection, boolean()} | {blob_garbage_collection_age_cutoff, float()} | {blob_garbage_collection_force_threshold, float()} | {blob_compaction_readahead_size, non_neg_integer()} | {blob_file_starting_level, non_neg_integer()} | {blob_cache, cache_handle()} | {prepopulate_blob_cache, prepopulate_blob_cache()} | {compression, compression_type()} | {bottommost_compression, compression_type()} | {compression_opts, compression_opts()} | {bottommost_compression_opts, compression_opts()} | {num_levels, pos_integer()} | {level0_file_num_compaction_trigger, integer()} | {level0_slowdown_writes_trigger, integer()} | {level0_stop_writes_trigger, integer()} | {target_file_size_base, pos_integer()} | {target_file_size_multiplier, pos_integer()} | {max_bytes_for_level_base, pos_integer()} | {max_bytes_for_level_multiplier, pos_integer()} | {max_compaction_bytes, pos_integer()} | {arena_block_size, integer()} | {disable_auto_compactions, boolean()} | {compaction_style, compaction_style()} | {compaction_pri, compaction_pri()} | {filter_deletes, boolean()} | {max_sequential_skip_in_iterations, pos_integer()} | {inplace_update_support, boolean()} | {inplace_update_num_locks, pos_integer()} | {table_factory_block_cache_size, pos_integer()} | {in_memory_mode, boolean()} | {block_based_table_options, block_based_table_options()} | {level_compaction_dynamic_level_bytes, boolean()} | {optimize_filters_for_hits, boolean()} | {prefix_extractor, {fixed_prefix_transform, integer()} | {capped_prefix_transform, integer()}} | {merge_operator, merge_operator()}].
-type column_family() :: cf_handle() | default_column_family.
-type compact_range_options() :: [{exclusive_manual_compaction, boolean()} | {change_level, boolean()} | {target_level, integer()} | {allow_write_stall, boolean()} | {max_subcompactions, non_neg_integer()}].
-type compaction_pri() :: compensated_size | oldest_largest_seq_first | oldest_smallest_seq_first.
-type compaction_style() :: level | universal | fifo | none.
-type compression_opts() :: [{enabled, boolean()} | {window_bits, pos_integer()} | {level, non_neg_integer()} | {strategy, integer()} | {max_dict_bytes, non_neg_integer()} | {zstd_max_train_bytes, non_neg_integer()}].
-type compression_type() :: snappy | zlib | bzip2 | lz4 | lz4h | zstd | none.
-opaque db_handle()
-type db_options() :: [{env, env()} | {total_threads, pos_integer()} | {create_if_missing, boolean()} | {create_missing_column_families, boolean()} | {error_if_exists, boolean()} | {paranoid_checks, boolean()} | {max_open_files, integer()} | {max_total_wal_size, non_neg_integer()} | {use_fsync, boolean()} | {db_paths, [#db_path{path :: file:filename_all(), target_size :: non_neg_integer()}]} | {db_log_dir, file:filename_all()} | {wal_dir, file:filename_all()} | {delete_obsolete_files_period_micros, pos_integer()} | {max_background_jobs, pos_integer()} | {max_background_compactions, pos_integer()} | {max_background_flushes, pos_integer()} | {max_log_file_size, non_neg_integer()} | {log_file_time_to_roll, non_neg_integer()} | {keep_log_file_num, pos_integer()} | {max_manifest_file_size, pos_integer()} | {table_cache_numshardbits, pos_integer()} | {wal_ttl_seconds, non_neg_integer()} | {manual_wal_flush, boolean()} | {wal_size_limit_mb, non_neg_integer()} | {manifest_preallocation_size, pos_integer()} | {allow_mmap_reads, boolean()} | {allow_mmap_writes, boolean()} | {is_fd_close_on_exec, boolean()} | {stats_dump_period_sec, non_neg_integer()} | {advise_random_on_open, boolean()} | {access_hint, access_hint()} | {compaction_readahead_size, non_neg_integer()} | {use_adaptive_mutex, boolean()} | {bytes_per_sync, non_neg_integer()} | {skip_stats_update_on_db_open, boolean()} | {wal_recovery_mode, wal_recovery_mode()} | {allow_concurrent_memtable_write, boolean()} | {enable_write_thread_adaptive_yield, boolean()} | {db_write_buffer_size, non_neg_integer()} | {in_memory, boolean()} | {rate_limiter, rate_limiter_handle()} | {sst_file_manager, sst_file_manager()} | {write_buffer_manager, write_buffer_manager()} | {max_subcompactions, non_neg_integer()} | {atomic_flush, boolean()} | {use_direct_reads, boolean()} | {use_direct_io_for_flush_and_compaction, boolean()} | {enable_pipelined_write, boolean()} | {unordered_write, boolean()} | {two_write_queues, boolean()} | {statistics, statistics_handle()}].
-opaque env()
-opaque env_handle()
-type env_priority() :: priority_high | priority_low.
-type env_type() :: default | memenv.
-opaque itr_handle()
-type merge_operator() :: erlang_merge_operator | bitset_merge_operator | {bitset_merge_operator, non_neg_integer()} | counter_merge_operator.
-type options() :: db_options() | cf_options().
-type prepopulate_blob_cache() :: disable | flush_only.
-opaque rate_limiter_handle()
-type read_options() :: [{read_tier, read_tier()} | {verify_checksums, boolean()} | {fill_cache, boolean()} | {iterate_upper_bound, binary()} | {iterate_lower_bound, binary()} | {tailing, boolean()} | {total_order_seek, boolean()} | {prefix_same_as_start, boolean()} | {snapshot, snapshot_handle()}].
-type read_tier() :: read_all_tier | block_cache_tier | persisted_tier | memtable_tier.
-type size_approximation_flag() :: none | include_memtables | include_files | include_both.
-opaque snapshot_handle()
-opaque sst_file_manager()
-opaque statistics_handle()
-type stats_level() ::
stats_disable_all | stats_except_tickers | stats_except_histogram_or_timers |
stats_except_timers | stats_except_detailed_timers | stats_except_time_for_mutex | stats_all.
-opaque transaction_handle()
-type wal_recovery_mode() ::
tolerate_corrupted_tail_records | absolute_consistency | point_in_time_recovery |
skip_any_corrupted_records.
-type write_actions() :: [{put, Key :: binary(), Value :: binary()} | {put, ColumnFamilyHandle :: cf_handle(), Key :: binary(), Value :: binary()} | {delete, Key :: binary()} | {delete, ColumnFamilyHandle :: cf_handle(), Key :: binary()} | {single_delete, Key :: binary()} | {single_delete, ColumnFamilyHandle :: cf_handle(), Key :: binary()} | clear].
-opaque write_buffer_manager()
Functions
-spec batch() -> {ok, Batch :: batch_handle()}.
-spec batch_clear(Batch :: batch_handle()) -> ok.
-spec batch_count(_Batch :: batch_handle()) -> Count :: non_neg_integer().
-spec batch_data_size(_Batch :: batch_handle()) -> BatchSize :: non_neg_integer().
-spec batch_delete(Batch :: batch_handle(), Key :: binary()) -> ok.
-spec batch_delete(Batch :: batch_handle(), ColumnFamily :: cf_handle(), Key :: binary()) -> ok.
batch_delete/2
but apply the operation to a column family
-spec batch_merge(Batch :: batch_handle(), Key :: binary(), Value :: binary()) -> ok.
-spec batch_merge(Batch :: batch_handle(), ColumnFamily :: cf_handle(), Key :: binary(), Value :: binary()) -> ok.
batch_mege/3
but apply the operation to a column family
-spec batch_put(Batch :: batch_handle(), Key :: binary(), Value :: binary()) -> ok.
-spec batch_put(Batch :: batch_handle(), ColumnFamily :: cf_handle(), Key :: binary(), Value :: binary()) -> ok.
batch_put/3
but apply the operation to a column family
-spec batch_rollback(Batch :: batch_handle()) -> ok.
-spec batch_savepoint(Batch :: batch_handle()) -> ok.
-spec batch_single_delete(Batch :: batch_handle(), Key :: binary()) -> ok.
-spec batch_single_delete(Batch :: batch_handle(), ColumnFamily :: cf_handle(), Key :: binary()) -> ok.
batch_single_delete/2
but apply the operation to a column family
-spec batch_tolist(Batch :: batch_handle()) -> Ops :: write_actions().
-spec cache_info(Cache) -> InfoList when Cache :: cache_handle(), InfoList :: [InfoTuple], InfoTuple :: {capacity, non_neg_integer()} | {strict_capacity, boolean()} | {usage, non_neg_integer()} | {pinned_usage, non_neg_integer()}.
{capacity, integer >=0}
the maximum configured capacity of the cache. * {strict_capacity, boolean}
the flag whether to return error on insertion when cache reaches its full capacity. * {usage, integer >=0}
the memory size for the entries residing in the cache. * {pinned_usage, integer >= 0}
the memory size for the entries in use by the system
-spec cache_info(Cache, Item) -> Value when Cache :: cache_handle(), Item :: capacity | strict_capacity | usage | pinned_usage, Value :: term().
-spec checkpoint(DbHandle :: db_handle(), Path :: file:filename_all()) -> ok | {error, any()}.
-spec close_backup_engine(backup_engine()) -> ok.
-spec compact_range(DBHandle, BeginKey, EndKey, CompactRangeOpts) -> Res when DBHandle :: db_handle(), BeginKey :: binary() | undefined, EndKey :: binary() | undefined, CompactRangeOpts :: compact_range_options(), Res :: ok | {error, any()}.
Compact the underlying storage for the key range [*begin,*end]. The actual compaction interval might be superset of [*begin, *end]. In particular, deleted and overwritten versions are discarded, and the data is rearranged to reduce the cost of operations needed to access the data. This operation should typically only be invoked by users who understand the underlying implementation.
"begin==undefined" is treated as a key before all keys in the database. "end==undefined" is treated as a key after all keys in the database. Therefore the following call will compact the entire database: rocksdb::compact_range(Options, undefined, undefined); Note that after the entire database is compacted, all data are pushed down to the last level containing any data. If the total data size after compaction is reduced, that level might not be appropriate for hosting all the files. In this case, client could set options.change_level to true, to move the files back to the minimum level capable of holding the data set or a given level (specified by non-negative target_level).-spec compact_range(DBHandle, CFHandle, BeginKey, EndKey, CompactRangeOpts) -> Res when DBHandle :: db_handle(), CFHandle :: cf_handle(), BeginKey :: binary() | undefined, EndKey :: binary() | undefined, CompactRangeOpts :: compact_range_options(), Res :: ok | {error, any()}.
compact_range/3
but for a column family
-spec count(DBHandle :: db_handle()) -> non_neg_integer() | {error, any()}.
Return the approximate number of keys in the default column family. Implemented by calling GetIntProperty with "rocksdb.estimate-num-keys"
this function is deprecated and will be removed in next major release.-spec count(DBHandle :: db_handle(), CFHandle :: cf_handle()) -> non_neg_integer() | {error, any()}.
Return the approximate number of keys in the specified column family.
this function is deprecated and will be removed in next major release.-spec create_column_family(DBHandle, Name, CFOpts) -> Res when DBHandle :: db_handle(), Name :: string(), CFOpts :: cf_options(), Res :: {ok, cf_handle()} | {error, any()}.
-spec create_new_backup(BackupEngine :: backup_engine(), Db :: db_handle()) -> ok | {error, term()}.
-spec delete(DBHandle, Key, WriteOpts) -> ok | {error, any()} when DBHandle :: db_handle(), Key :: binary(), WriteOpts :: write_options().
-spec delete(DBHandle, CFHandle, Key, WriteOpts) -> Res when DBHandle :: db_handle(), CFHandle :: cf_handle(), Key :: binary(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
-spec delete_backup(BackupEngine :: backup_engine(), BackupId :: non_neg_integer()) -> ok | {error, any()}.
-spec delete_range(DBHandle, BeginKey, EndKey, WriteOpts) -> Res when DBHandle :: db_handle(), BeginKey :: binary(), EndKey :: binary(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
Removes the database entries in the range ["BeginKey", "EndKey"), i.e., including "BeginKey" and excluding "EndKey". Returns OK on success, and a non-OK status on error. It is not an error if no keys exist in the range ["BeginKey", "EndKey").
This feature is currently an experimental performance optimization for deleting very large ranges of contiguous keys. Invoking it many times or on small ranges may severely degrade read performance; in particular, the resulting performance can be worse than calling Delete() for each key in the range. Note also the degraded read performance affects keys outside the deleted ranges, and affects database operations involving scans, like flush and compaction.
Consider setting ReadOptions::ignore_range_deletions = true to speed up reads for key(s) that are known to be unaffected by range deletions.-spec delete_range(DBHandle, CFHandle, BeginKey, EndKey, WriteOpts) -> Res when DBHandle :: db_handle(), CFHandle :: cf_handle(), BeginKey :: binary(), EndKey :: binary(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
delete_range/3
but for a column family
-spec destroy(Name :: file:filename_all(), DBOpts :: db_options()) -> ok | {error, any()}.
-spec destroy_env(Env :: env_handle()) -> ok.
-spec flush(db_handle(), flush_options()) -> ok | {error, term()}.
-spec flush(db_handle(), column_family(), flush_options()) -> ok | {error, term()}.
-spec fold(DBHandle, Fun, AccIn, ReadOpts) -> AccOut when DBHandle :: db_handle(), Fun :: fold_fun(), AccIn :: any(), ReadOpts :: read_options(), AccOut :: any().
Calls Fun(Elem, AccIn) on successive elements in the default column family starting with AccIn == Acc0. Fun/2 must return a new accumulator which is passed to the next call. The function returns the final value of the accumulator. Acc0 is returned if the default column family is empty.
this function is deprecated and will be removed in next major release. You should use theiterator
API instead.
-spec fold(DBHandle, CFHandle, Fun, AccIn, ReadOpts) -> AccOut when DBHandle :: db_handle(), CFHandle :: cf_handle(), Fun :: fold_fun(), AccIn :: any(), ReadOpts :: read_options(), AccOut :: any().
Calls Fun(Elem, AccIn) on successive elements in the specified column family Other specs are same with fold/4
this function is deprecated and will be removed in next major release. You should use theiterator
API instead.
-spec fold_keys(DBHandle, Fun, AccIn, ReadOpts) -> AccOut when DBHandle :: db_handle(), Fun :: fold_keys_fun(), AccIn :: any(), ReadOpts :: read_options(), AccOut :: any().
Calls Fun(Elem, AccIn) on successive elements in the default column family starting with AccIn == Acc0. Fun/2 must return a new accumulator which is passed to the next call. The function returns the final value of the accumulator. Acc0 is returned if the default column family is empty.
this function is deprecated and will be removed in next major release. You should use theiterator
API instead.
-spec fold_keys(DBHandle, CFHandle, Fun, AccIn, ReadOpts) -> AccOut when DBHandle :: db_handle(), CFHandle :: cf_handle(), Fun :: fold_keys_fun(), AccIn :: any(), ReadOpts :: read_options(), AccOut :: any().
Calls Fun(Elem, AccIn) on successive elements in the specified column family Other specs are same with fold_keys/4
this function is deprecated and will be removed in next major release. You should use theiterator
API instead.
-spec gc_backup_engine(backup_engine()) -> ok.
-spec get_approximate_memtable_stats(DBHandle, StartKey, LimitKey) -> Res when DBHandle :: db_handle(), StartKey :: binary(), LimitKey :: binary(), Res :: {ok, {Count :: non_neg_integer(), Size :: non_neg_integer()}}.
-spec get_approximate_memtable_stats(DBHandle, CFHandle, StartKey, LimitKey) -> Res when DBHandle :: db_handle(), CFHandle :: cf_handle(), StartKey :: binary(), LimitKey :: binary(), Res :: {ok, {Count :: non_neg_integer(), Size :: non_neg_integer()}}.
-spec get_approximate_sizes(DBHandle, Ranges, IncludeFlags) -> Sizes when DBHandle :: db_handle(), Ranges :: [range()], IncludeFlags :: size_approximation_flag(), Sizes :: [non_neg_integer()].
For each i in [0,n-1], store in "Sizes[i]", the approximate file system space used by keys in "[range[i].start .. range[i].limit)".
Note that the returned sizes measure file system space usage, so if the user data compresses by a factor of ten, the returned sizes will be one-tenth the size of the corresponding user data size.
IfIncludeFlags
defines whether the returned size should include the recently written data in the mem-tables (if the mem-table type supports it), data serialized to disk, or both.
-spec get_approximate_sizes(DBHandle, CFHandle, Ranges, IncludeFlags) -> Sizes when DBHandle :: db_handle(), CFHandle :: cf_handle(), Ranges :: [range()], IncludeFlags :: size_approximation_flag(), Sizes :: [non_neg_integer()].
-spec get_backup_info(backup_engine()) -> [backup_info()].
-spec get_latest_sequence_number(Db :: db_handle()) -> Seq :: non_neg_integer().
-spec get_snapshot_sequence(SnapshotHandle :: snapshot_handle()) -> Sequence :: non_neg_integer().
-spec is_empty(DBHandle :: db_handle()) -> true | false.
-spec iterator(DBHandle, ReadOpts) -> Res when DBHandle :: db_handle(), ReadOpts :: read_options(), Res :: {ok, itr_handle()} | {error, any()}.
-spec iterator(DBHandle, CFHandle, ReadOpts) -> Res when DBHandle :: db_handle(), CFHandle :: cf_handle(), ReadOpts :: read_options(), Res :: {ok, itr_handle()} | {error, any()}.
-spec iterator_close(ITRHandle) -> ok | {error, _} when ITRHandle :: itr_handle().
-spec iterator_move(ITRHandle, ITRAction) -> {ok, Key :: binary(), Value :: binary()} | {ok, Key :: binary()} | {error, invalid_iterator} | {error, iterator_closed} when ITRHandle :: itr_handle(), ITRAction :: iterator_action().
-spec iterator_refresh(ITRHandle) -> ok when ITRHandle :: itr_handle().
-spec iterators(DBHandle, CFHandle, ReadOpts) -> {ok, itr_handle()} | {error, any()} when DBHandle :: db_handle(), CFHandle :: cf_handle(), ReadOpts :: read_options().
-spec list_column_families(Name, DBOpts) -> Res when Name :: file:filename_all(), DBOpts :: db_options(), Res :: {ok, [string()]} | {error, any()}.
-spec merge(DBHandle, Key, Value, WriteOpts) -> Res when DBHandle :: db_handle(), Key :: binary(), Value :: binary(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
-spec new_cache(Type :: cache_type(), Capacity :: non_neg_integer()) -> {ok, cache_handle()}.
-spec new_env() -> {ok, env_handle()}.
-spec new_env(EnvType :: env_type()) -> {ok, env_handle()}.
-spec new_sst_file_manager(env_handle()) -> {ok, sst_file_manager()} | {error, any()}.
-spec new_sst_file_manager(Env, OptionsList) -> Result when Env :: env_handle(), OptionsList :: [OptionTuple], OptionTuple :: {delete_rate_bytes_per_sec, non_neg_integer()} | {max_trash_db_ratio, float()} | {bytes_max_delete_chunk, non_neg_integer()}, Result :: {ok, sst_file_manager()} | {error, any()}.
create new SstFileManager that can be shared among multiple RocksDB instances to track SST file and control there deletion rate.
*Env
is an environment resource created using rocksdb:new_env/{0,1}
. * delete_rate_bytes_per_sec
: How many bytes should be deleted per second, If this value is set to 1024 (1 Kb / sec) and we deleted a file of size 4 Kb in 1 second, we will wait for another 3 seconds before we delete other files, Set to 0 to disable deletion rate limiting. * max_trash_db_ratio
: If the trash size constitutes for more than this fraction of the total DB size we will start deleting new files passed to DeleteScheduler immediately * bytes_max_delete_chunk
: if a file to delete is larger than delete chunk, ftruncate the file by this size each time, rather than dropping the whole file. 0 means to always delete the whole file. If the file has more than one linked names, the file will be deleted as a whole. Either way, delete_rate_bytes_per_sec
will be appreciated. NOTE that with this option, files already renamed as a trash may be partial, so users should not directly recover them without checking.
-spec new_statistics() -> {ok, statistics_handle()}.
-spec new_write_buffer_manager(BufferSize :: non_neg_integer()) -> {ok, write_buffer_manager()}.
-spec new_write_buffer_manager(BufferSize :: non_neg_integer(), Cache :: cache_handle()) -> {ok, write_buffer_manager()}.
create a new WriteBufferManager. a WriteBufferManager is for managing memory allocation for one or more MemTables.
The memory usage of memtable will report to this object. The same object can be passed into multiple DBs and it will track the sum of size of all the DBs. If the total size of all live memtables of all the DBs exceeds a limit, a flush will be triggered in the next DB to which the next write is issued.
If the object is only passed to on DB, the behavior is the same as db_write_buffer_size. When write_buffer_manager is set, the value set will override db_write_buffer_size.-spec open(Name, DBOpts) -> Result when Name :: file:filename_all(), DBOpts :: options(), Result :: {ok, db_handle()} | {error, any()}.
-spec open(Name, DBOpts, CFDescriptors) -> {ok, db_handle(), [cf_handle()]} | {error, any()} when Name :: file:filename_all(), DBOpts :: db_options(), CFDescriptors :: [cf_descriptor()].
-spec open_backup_engine(Path :: string()) -> {ok, backup_engine()} | {error, term()}.
-spec open_readonly(Name, DBOpts) -> Result when Name :: file:filename_all(), DBOpts :: options(), Result :: {ok, db_handle()} | {error, any()}.
-spec open_readonly(Name, DBOpts, CFDescriptors) -> {ok, db_handle(), [cf_handle()]} | {error, any()} when Name :: file:filename_all(), DBOpts :: db_options(), CFDescriptors :: [cf_descriptor()].
-spec open_with_ttl(Name, DBOpts, TTL, ReadOnly) -> {ok, db_handle()} | {error, any()} when Name :: file:filename_all(), DBOpts :: db_options(), TTL :: integer(), ReadOnly :: boolean().
Open RocksDB with TTL support This API should be used to open the db when key-values inserted are meant to be removed from the db in a non-strict TTL
amount of time Therefore, this guarantees that key-values inserted will remain in the db for >= TTL amount of time and the db will make efforts to remove the key-values as soon as possible after ttl seconds of their insertion.
Timestamp+ttl<time_now
) Get/Iterator may return expired entries(compaction not run on them yet) Different TTL may be used during different Opens Example: Open1 at t=0 with TTL=4 and insert k1,k2, close at t=2 Open2 at t=3 with TTL=5. Now k1,k2 should be deleted at t>=5 Readonly=true opens in the usual read-only mode. Compactions will not be triggered(neither manual nor automatic), so no expired entries removed
-spec purge_old_backup(BackupEngine :: backup_engine(), NumBackupToKeep :: non_neg_integer()) -> ok | {error, any()}.
-spec put(DBHandle, Key, Value, WriteOpts) -> Res when DBHandle :: db_handle(), Key :: binary(), Value :: binary(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
-spec release_batch(Batch :: batch_handle()) -> ok.
-spec release_snapshot(SnapshotHandle :: snapshot_handle()) -> ok | {error, any()}.
-spec release_sst_file_manager(sst_file_manager()) -> ok.
-spec release_statistics(statistics_handle()) -> ok.
-spec release_transaction(TransactionHandle :: transaction_handle()) -> ok.
-spec release_write_buffer_manager(write_buffer_manager()) -> ok.
-spec repair(Name :: file:filename_all(), DBOpts :: db_options()) -> ok | {error, any()}.
-spec restore_db_from_backup(BackupEngine, BackupId, DbDir) -> Result when BackupEngine :: backup_engine(), BackupId :: non_neg_integer(), DbDir :: string(), Result :: ok | {error, any()}.
-spec restore_db_from_backup(BackupEngine, BackupId, DbDir, WalDir) -> Result when BackupEngine :: backup_engine(), BackupId :: non_neg_integer(), DbDir :: string(), WalDir :: string(), Result :: ok | {error, any()}.
-spec restore_db_from_latest_backup(BackupEngine, DbDir) -> Result when BackupEngine :: backup_engine(), DbDir :: string(), Result :: ok | {error, any()}.
-spec restore_db_from_latest_backup(BackupEngine, DbDir, WalDir) -> Result when BackupEngine :: backup_engine(), DbDir :: string(), WalDir :: string(), Result :: ok | {error, any()}.
-spec set_capacity(Cache :: cache_handle(), Capacity :: non_neg_integer()) -> ok.
-spec set_db_background_threads(DB :: db_handle(), N :: non_neg_integer()) -> ok.
-spec set_db_background_threads(DB :: db_handle(), N :: non_neg_integer(), Priority :: env_priority()) -> ok.
-spec set_env_background_threads(Env :: env_handle(), N :: non_neg_integer()) -> ok.
-spec set_env_background_threads(Env :: env_handle(), N :: non_neg_integer(), Priority :: env_priority()) -> ok.
-spec set_stats_level(statistics_handle(), stats_level()) -> ok.
-spec set_strict_capacity_limit(Cache :: cache_handle(), StrictCapacityLimit :: boolean()) -> ok.
-spec single_delete(DBHandle, Key, WriteOpts) -> ok | {error, any()} when DBHandle :: db_handle(), Key :: binary(), WriteOpts :: write_options().
Remove the database entry for "key". Requires that the key exists and was not overwritten. Returns OK on success, and a non-OK status on error. It is not an error if "key" did not exist in the database.
If a key is overwritten (by calling Put() multiple times), then the result of calling SingleDelete() on this key is undefined. SingleDelete() only behaves correctly if there has been only one Put() for this key since the previous call to SingleDelete() for this key.
This feature is currently an experimental performance optimization for a very specific workload. It is up to the caller to ensure that SingleDelete is only used for a key that is not deleted using Delete() or written using Merge(). Mixing SingleDelete operations with Deletes can result in undefined behavior.
Note: consider setting options{sync, true}
.
-spec single_delete(DBHandle, CFHandle, Key, WriteOpts) -> Res when DBHandle :: db_handle(), CFHandle :: cf_handle(), Key :: binary(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
single_delete/3
but on the specified column family
-spec snapshot(DbHandle :: db_handle()) -> {ok, snapshot_handle()} | {error, any()}.
-spec sst_file_manager_flag(SstFileManager, Flag, Value) -> Result when SstFileManager :: sst_file_manager(), Flag :: max_allowed_space_usage | compaction_buffer_size | delete_rate_bytes_per_sec | max_trash_db_ratio, Value :: non_neg_integer() | float(), Result :: ok.
set certains flags for the SST file manager * max_allowed_space_usage
: Update the maximum allowed space that should be used by RocksDB, if the total size of the SST files exceeds MaxAllowedSpace, writes to RocksDB will fail.
compaction_buffer_size
: Set the amount of buffer room each compaction should be able to leave. In other words, at its maximum disk space consumption, the compaction should still leave compaction_buffer_size available on the disk so that other background functions may continue, such as logging and flushing. * delete_rate_bytes_per_sec
: Update the delete rate limit in bytes per second. zero means disable delete rate limiting and delete files immediately * max_trash_db_ratio
: Update trash/DB size ratio where new files will be deleted immediately (float)
-spec sst_file_manager_info(SstFileManager) -> InfoList when SstFileManager :: sst_file_manager(), InfoList :: [InfoTuple], InfoTuple :: {total_size, non_neg_integer()} | {delete_rate_bytes_per_sec, non_neg_integer()} | {max_trash_db_ratio, float()} | {total_trash_size, non_neg_integer()} | {is_max_allowed_space_reached, boolean()} | {max_allowed_space_reached_including_compactions, boolean()}.
return informations of a Sst File Manager as a list of tuples.
*{total_size, Int>0}
: total size of all tracked files * {delete_rate_bytes_per_sec, Int > 0}
: delete rate limit in bytes per second * {max_trash_db_ratio, Float>0}
: trash/DB size ratio where new files will be deleted immediately * {total_trash_size, Int > 0}
: total size of trash files * {is_max_allowed_space_reached, Boolean}
true if the total size of SST files exceeded the maximum allowed space usage * {max_allowed_space_reached_including_compactions, Boolean}
: true if the total size of SST files as well as estimated size of ongoing compactions exceeds the maximums allowed space usage
-spec sst_file_manager_info(SstFileManager, Item) -> Value when SstFileManager :: sst_file_manager(), Item :: total_size | delete_rate_bytes_per_sec | max_trash_db_ratio | total_trash_size | is_max_allowed_space_reached | max_allowed_space_reached_including_compactions, Value :: term().
-spec statistics_info(Statistics) -> InfoList when Statistics :: statistics_handle(), InfoList :: [InfoTuple], InfoTuple :: {stats_level, stats_level()}.
-spec stop_backup(backup_engine()) -> ok.
-spec tlog_iterator(Db :: db_handle(), Since :: non_neg_integer()) -> {ok, Iterator :: term()}.
-spec tlog_iterator_close(term()) -> ok.
-spec tlog_next_binary_update(Iterator :: term()) -> {ok, LastSeq :: non_neg_integer(), BinLog :: binary()} | {error, term()}.
-spec tlog_next_update(Iterator :: term()) -> {ok, LastSeq :: non_neg_integer(), Log :: write_actions(), BinLog :: binary()} | {error, term()}.
tlog_nex_binary_update/1
but also return the batch as a list of operations
-spec transaction(TransactionDB :: db_handle(), WriteOptions :: write_options()) -> {ok, transaction_handle()}.
-spec transaction_commit(Transaction :: transaction_handle()) -> ok | {error, term()}.
-spec transaction_delete(Transaction :: transaction_handle(), Key :: binary()) -> ok.
-spec transaction_delete(Transaction :: transaction_handle(), ColumnFamily :: cf_handle(), Key :: binary()) -> ok.
transaction_delete/2
but apply the operation to a column family
-spec transaction_get(Transaction :: transaction_handle(), Key :: binary(), Opts :: read_options()) -> Res :: {ok, binary()} | not_found | {error, {corruption, string()}} | {error, any()}.
-spec transaction_get(Transaction :: transaction_handle(), ColumnFamily :: cf_handle(), Key :: binary(), Opts :: read_options()) -> Res :: {ok, binary()} | not_found | {error, {corruption, string()}} | {error, any()}.
transaction_get/3
but apply the operation to a column family
-spec transaction_iterator(TransactionHandle, ReadOpts) -> Res when TransactionHandle :: transaction_handle(), ReadOpts :: read_options(), Res :: {ok, itr_handle()} | {error, any()}.
-spec transaction_iterator(TransactionHandle, CFHandle, ReadOpts) -> Res when TransactionHandle :: transaction_handle(), CFHandle :: cf_handle(), ReadOpts :: read_options(), Res :: {ok, itr_handle()} | {error, any()}.
-spec transaction_put(Transaction :: transaction_handle(), Key :: binary(), Value :: binary()) -> ok | {error, any()}.
-spec transaction_put(Transaction :: transaction_handle(), ColumnFamily :: cf_handle(), Key :: binary(), Value :: binary()) -> ok | {error, any()}.
transaction_put/3
but apply the operation to a column family
-spec transaction_rollback(Transaction :: transaction_handle()) -> ok | {error, term()}.
-spec verify_backup(BackupEngine :: backup_engine(), BackupId :: non_neg_integer()) -> ok | {error, any()}.
-spec write(DBHandle, WriteActions, WriteOpts) -> Res when DBHandle :: db_handle(), WriteActions :: write_actions(), WriteOpts :: write_options(), Res :: ok | {error, any()}.
batch_*
API instead.
-spec write_batch(Db :: db_handle(), Batch :: batch_handle(), WriteOptions :: write_options()) -> ok | {error, term()}.
-spec write_binary_update(DbHandle :: db_handle(), BinLog :: binary(), WriteOptions :: write_options()) -> ok | {error, term()}.
-spec write_buffer_manager_info(WriteBufferManager) -> InfoList when WriteBufferManager :: write_buffer_manager(), InfoList :: [InfoTuple], InfoTuple :: {memory_usage, non_neg_integer()} | {mutable_memtable_memory_usage, non_neg_integer()} | {buffer_size, non_neg_integer()} | {enabled, boolean()}.
-spec write_buffer_manager_info(WriteBufferManager, Item) -> Value when WriteBufferManager :: write_buffer_manager(), Item :: memory_usage | mutable_memtable_memory_usage | buffer_size | enabled, Value :: term().