mirror of
https://github.com/facebook/rocksdb.git
synced 2024-12-02 10:15:54 +00:00
86a1e3e0e7
Summary: ... so that cache keys can be derived from DB manifest data before reading the file from storage--so that every part of the file can potentially go in a persistent cache. See updated comments in cache_key.cc for technical details. Importantly, the new cache key encoding uses some fancy but efficient math to pack data into the cache key without depending on the sizes of the various pieces. This simplifies some existing code creating cache keys, like cache warming before the file size is known. This should provide us an essentially permanent mapping between SST unique IDs and base cache keys, with the ability to "upgrade" SST unique IDs (and thus cache keys) with new SST format_versions. These cache keys are of similar, perhaps indistinguishable quality to the previous generation. Before this change (see "corrected" days between collision): ``` ./cache_bench -stress_cache_key -sck_keep_bits=43 18 collisions after 2 x 90 days, est 10 days between (1.15292e+19 corrected) ``` After this change (keep 43 bits, up through 50, to validate "trajectory" is ok on "corrected" days between collision): ``` 19 collisions after 3 x 90 days, est 14.2105 days between (1.63836e+19 corrected) 16 collisions after 5 x 90 days, est 28.125 days between (1.6213e+19 corrected) 15 collisions after 7 x 90 days, est 42 days between (1.21057e+19 corrected) 15 collisions after 17 x 90 days, est 102 days between (1.46997e+19 corrected) 15 collisions after 49 x 90 days, est 294 days between (2.11849e+19 corrected) 15 collisions after 62 x 90 days, est 372 days between (1.34027e+19 corrected) 15 collisions after 53 x 90 days, est 318 days between (5.72858e+18 corrected) 15 collisions after 309 x 90 days, est 1854 days between (1.66994e+19 corrected) ``` However, the change does modify (probably weaken) the "guaranteed unique" promise from this > SST files generated in a single process are guaranteed to have unique cache keys, unless/until number session ids * max file number = 2**86 to this (see https://github.com/facebook/rocksdb/issues/10388) > With the DB id limitation, we only have nice guaranteed unique cache keys for files generated in a single process until biggest session_id_counter and offset_in_file reach combined 64 bits I don't think this is a practical concern, though. Pull Request resolved: https://github.com/facebook/rocksdb/pull/10394 Test Plan: unit tests updated, see simulation results above Reviewed By: jay-zhuang Differential Revision: D38667529 Pulled By: pdillinger fbshipit-source-id: 49af3fe7f47e5b61162809a78b76c769fd519fba
144 lines
5.7 KiB
C++
144 lines
5.7 KiB
C++
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#pragma once
|
|
|
|
#include <cstdint>
|
|
|
|
#include "rocksdb/rocksdb_namespace.h"
|
|
#include "rocksdb/slice.h"
|
|
#include "table/unique_id_impl.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
class Cache;
|
|
|
|
// A standard holder for fixed-size block cache keys (and for related caches).
|
|
// They are created through one of these, each using its own range of values:
|
|
// * CacheKey::CreateUniqueForCacheLifetime
|
|
// * CacheKey::CreateUniqueForProcessLifetime
|
|
// * Default ctor ("empty" cache key)
|
|
// * OffsetableCacheKey->WithOffset
|
|
//
|
|
// The first two use atomic counters to guarantee uniqueness over the given
|
|
// lifetime and the last uses a form of universally unique identifier for
|
|
// uniqueness with very high probabilty (and guaranteed for files generated
|
|
// during a single process lifetime).
|
|
//
|
|
// CacheKeys are currently used by calling AsSlice() to pass as a key to
|
|
// Cache. For performance, the keys are endianness-dependent (though otherwise
|
|
// portable). (Persistable cache entries are not intended to cross platforms.)
|
|
class CacheKey {
|
|
public:
|
|
// For convenience, constructs an "empty" cache key that is never returned
|
|
// by other means.
|
|
inline CacheKey() : file_num_etc64_(), offset_etc64_() {}
|
|
|
|
inline bool IsEmpty() const {
|
|
return (file_num_etc64_ == 0) & (offset_etc64_ == 0);
|
|
}
|
|
|
|
// Use this cache key as a Slice (byte order is endianness-dependent)
|
|
inline Slice AsSlice() const {
|
|
static_assert(sizeof(*this) == 16, "Standardized on 16-byte cache key");
|
|
assert(!IsEmpty());
|
|
return Slice(reinterpret_cast<const char *>(this), sizeof(*this));
|
|
}
|
|
|
|
// Create a CacheKey that is unique among others associated with this Cache
|
|
// instance. Depends on Cache::NewId. This is useful for block cache
|
|
// "reservations".
|
|
static CacheKey CreateUniqueForCacheLifetime(Cache *cache);
|
|
|
|
// Create a CacheKey that is unique among others for the lifetime of this
|
|
// process. This is useful for saving in a static data member so that
|
|
// different DB instances can agree on a cache key for shared entities,
|
|
// such as for CacheEntryStatsCollector.
|
|
static CacheKey CreateUniqueForProcessLifetime();
|
|
|
|
protected:
|
|
friend class OffsetableCacheKey;
|
|
CacheKey(uint64_t file_num_etc64, uint64_t offset_etc64)
|
|
: file_num_etc64_(file_num_etc64), offset_etc64_(offset_etc64) {}
|
|
uint64_t file_num_etc64_;
|
|
uint64_t offset_etc64_;
|
|
};
|
|
|
|
constexpr uint8_t kCacheKeySize = static_cast<uint8_t>(sizeof(CacheKey));
|
|
|
|
// A file-specific generator of cache keys, sometimes referred to as the
|
|
// "base" cache key for a file because all the cache keys for various offsets
|
|
// within the file are computed using simple arithmetic. The basis for the
|
|
// general approach is dicussed here: https://github.com/pdillinger/unique_id
|
|
// Heavily related to GetUniqueIdFromTableProperties.
|
|
//
|
|
// If the db_id, db_session_id, and file_number come from the file's table
|
|
// properties, then the keys will be stable across DB::Open/Close, backup/
|
|
// restore, import/export, etc.
|
|
//
|
|
// This class "is a" CacheKey only privately so that it is not misused as
|
|
// a ready-to-use CacheKey.
|
|
class OffsetableCacheKey : private CacheKey {
|
|
public:
|
|
// For convenience, constructs an "empty" cache key that should not be used.
|
|
inline OffsetableCacheKey() : CacheKey() {}
|
|
|
|
// Constructs an OffsetableCacheKey with the given information about a file.
|
|
// This constructor never generates an "empty" base key.
|
|
OffsetableCacheKey(const std::string &db_id, const std::string &db_session_id,
|
|
uint64_t file_number);
|
|
|
|
// Creates an OffsetableCacheKey from an SST unique ID, so that cache keys
|
|
// can be derived from DB manifest data before reading the file from
|
|
// storage--so that every part of the file can potentially go in a persistent
|
|
// cache.
|
|
//
|
|
// Calling GetSstInternalUniqueId() on a db_id, db_session_id, and
|
|
// file_number and passing the result to this function produces the same
|
|
// base cache key as feeding those inputs directly to the constructor.
|
|
//
|
|
// This is a bijective transformation assuming either id is empty or
|
|
// lower 64 bits is non-zero:
|
|
// * Empty (all zeros) input -> empty (all zeros) output
|
|
// * Lower 64 input is non-zero -> lower 64 output (file_num_etc64_) is
|
|
// non-zero
|
|
static OffsetableCacheKey FromInternalUniqueId(UniqueIdPtr id);
|
|
|
|
// This is the inverse transformation to the above, assuming either empty
|
|
// or lower 64 bits (file_num_etc64_) is non-zero. Perhaps only useful for
|
|
// testing.
|
|
UniqueId64x2 ToInternalUniqueId();
|
|
|
|
inline bool IsEmpty() const {
|
|
bool result = file_num_etc64_ == 0;
|
|
assert(!(offset_etc64_ > 0 && result));
|
|
return result;
|
|
}
|
|
|
|
// Construct a CacheKey for an offset within a file. An offset is not
|
|
// necessarily a byte offset if a smaller unique identifier of keyable
|
|
// offsets is used.
|
|
//
|
|
// This class was designed to make this hot code extremely fast.
|
|
inline CacheKey WithOffset(uint64_t offset) const {
|
|
assert(!IsEmpty());
|
|
return CacheKey(file_num_etc64_, offset_etc64_ ^ offset);
|
|
}
|
|
|
|
// The "common prefix" is a shared prefix for all the returned CacheKeys.
|
|
// It is specific to the file but the same for all offsets within the file.
|
|
static constexpr size_t kCommonPrefixSize = 8;
|
|
inline Slice CommonPrefixSlice() const {
|
|
static_assert(sizeof(file_num_etc64_) == kCommonPrefixSize,
|
|
"8 byte common prefix expected");
|
|
assert(!IsEmpty());
|
|
assert(&this->file_num_etc64_ == static_cast<const void *>(this));
|
|
|
|
return Slice(reinterpret_cast<const char *>(this), kCommonPrefixSize);
|
|
}
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|