kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ale...@apache.org
Subject [kudu] 03/03: [util] modernize signature of Cache interface methods
Date Tue, 07 May 2019 05:52:30 GMT
This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 5e3af4e2ae45ee5f700b9c6c28d56ff84ffeb319
Author: Alexey Serbin <alexey@apache.org>
AuthorDate: Sat Apr 27 01:16:11 2019 -0700

    [util] modernize signature of Cache interface methods
    
    Updated the signature of the Lookup() and Insert() methods of the Cache
    interface to return UniqueHandle.  That helps in automating reference
    counting for the cached items.
    
    Also, Release() and Free() methods have been moved into the protected
    section.  With UniqueHandle returned from Lookup() and Insert(),
    Release() and Free() are no longer required to be the public part
    of the Cache's interface.
    
    Change-Id: I31518cdfa269e6806aa5dda180956896d8cb3404
    Reviewed-on: http://gerrit.cloudera.org:8080/13175
    Reviewed-by: Adar Dembo <adar@cloudera.com>
    Tested-by: Alexey Serbin <aserbin@cloudera.com>
---
 src/kudu/cfile/block_cache.cc    |  25 ++++----
 src/kudu/cfile/block_cache.h     |  51 +++++-----------
 src/kudu/cfile/block_handle.h    |   4 +-
 src/kudu/cfile/cfile-test.cc     |  13 ++--
 src/kudu/codegen/code_cache.cc   |  19 +++---
 src/kudu/util/cache-bench.cc     |  10 ++--
 src/kudu/util/cache-test.cc      |  20 +++----
 src/kudu/util/cache.cc           | 124 +++++++++++++++++++++------------------
 src/kudu/util/cache.h            | 123 ++++++++++++++++++++------------------
 src/kudu/util/file_cache-test.cc |  18 ++----
 src/kudu/util/file_cache.cc      |  15 +++--
 src/kudu/util/nvm_cache.cc       |  22 ++++---
 src/kudu/util/ttl_cache.h        |  11 ++--
 13 files changed, 219 insertions(+), 236 deletions(-)

diff --git a/src/kudu/cfile/block_cache.cc b/src/kudu/cfile/block_cache.cc
index b5b5c2c..9865776 100644
--- a/src/kudu/cfile/block_cache.cc
+++ b/src/kudu/cfile/block_cache.cc
@@ -135,32 +135,33 @@ Cache::MemoryType BlockCache::GetConfiguredCacheMemoryTypeOrDie() {
 }
 
 BlockCache::BlockCache()
-  : BlockCache(FLAGS_block_cache_capacity_mb * 1024 * 1024) {
+    : BlockCache(FLAGS_block_cache_capacity_mb * 1024 * 1024) {
 }
 
 BlockCache::BlockCache(size_t capacity)
-  : cache_(CreateCache(capacity)) {
+    : cache_(CreateCache(capacity)) {
 }
 
 BlockCache::PendingEntry BlockCache::Allocate(const CacheKey& key, size_t block_size)
{
   Slice key_slice(reinterpret_cast<const uint8_t*>(&key), sizeof(key));
-  return PendingEntry(cache_.get(), cache_->Allocate(key_slice, block_size));
+  return PendingEntry(cache_->Allocate(key_slice, block_size));
 }
 
 bool BlockCache::Lookup(const CacheKey& key, Cache::CacheBehavior behavior,
-                        BlockCacheHandle *handle) {
-  Cache::Handle* h = cache_->Lookup(Slice(reinterpret_cast<const uint8_t*>(&key),
-                                          sizeof(key)), behavior);
-  if (h != nullptr) {
-    handle->SetHandle(cache_.get(), h);
+                        BlockCacheHandle* handle) {
+  auto h(cache_->Lookup(
+      Slice(reinterpret_cast<const uint8_t*>(&key), sizeof(key)), behavior));
+  if (h) {
+    handle->SetHandle(std::move(h));
+    return true;
   }
-  return h != nullptr;
+  return false;
 }
 
 void BlockCache::Insert(BlockCache::PendingEntry* entry, BlockCacheHandle* inserted) {
-  Cache::Handle* h = cache_->Insert(std::move(entry->handle_),
-                                    /* eviction_callback= */ nullptr);
-  inserted->SetHandle(cache_.get(), h);
+  auto h(cache_->Insert(std::move(entry->handle_),
+                        /* eviction_callback= */ nullptr));
+  inserted->SetHandle(std::move(h));
 }
 
 void BlockCache::StartInstrumentation(const scoped_refptr<MetricEntity>& metric_entity)
{
diff --git a/src/kudu/cfile/block_cache.h b/src/kudu/cfile/block_cache.h
index ba700c1..e333066 100644
--- a/src/kudu/cfile/block_cache.h
+++ b/src/kudu/cfile/block_cache.h
@@ -21,9 +21,6 @@
 #include <cstdint>
 #include <utility>
 
-#include <gflags/gflags_declare.h>
-#include <glog/logging.h>
-
 #include "kudu/fs/block_id.h"
 #include "kudu/gutil/gscoped_ptr.h"
 #include "kudu/gutil/macros.h"
@@ -32,8 +29,6 @@
 #include "kudu/util/cache.h"
 #include "kudu/util/slice.h"
 
-DECLARE_string(block_cache_type);
-
 template <class T> class scoped_refptr;
 
 namespace kudu {
@@ -82,12 +77,11 @@ class BlockCache {
   class PendingEntry {
    public:
     PendingEntry()
-        : cache_(nullptr),
-          handle_(Cache::UniquePendingHandle(nullptr,
+        : handle_(Cache::UniquePendingHandle(nullptr,
                                              Cache::PendingHandleDeleter(nullptr))) {
     }
-    PendingEntry(Cache* cache, Cache::UniquePendingHandle handle)
-        : cache_(cache), handle_(std::move(handle)) {
+    explicit PendingEntry(Cache::UniquePendingHandle handle)
+        : handle_(std::move(handle)) {
     }
     PendingEntry(PendingEntry&& other) noexcept : PendingEntry() {
       *this = std::move(other);
@@ -111,13 +105,12 @@ class BlockCache {
 
     // Return the pointer into which the value should be written.
     uint8_t* val_ptr() {
-      return cache_->MutableValue(handle_.get());
+      return handle_.get_deleter().cache()->MutableValue(&handle_);
     }
 
    private:
     friend class BlockCache;
 
-    Cache* cache_;
     Cache::UniquePendingHandle handle_;
   };
 
@@ -180,27 +173,17 @@ class BlockCache {
 // Scoped reference to a block from the block cache.
 class BlockCacheHandle {
  public:
-  BlockCacheHandle() :
-    handle_(NULL)
-  {}
-
-  ~BlockCacheHandle() {
-    if (handle_ != NULL) {
-      Release();
-    }
+  BlockCacheHandle()
+      : handle_(Cache::UniqueHandle(nullptr, Cache::HandleDeleter(nullptr))) {
   }
 
-  void Release() {
-    CHECK_NOTNULL(cache_)->Release(CHECK_NOTNULL(handle_));
-    handle_ = NULL;
-  }
+  ~BlockCacheHandle() = default;
 
   // Swap this handle with another handle.
   // This can be useful to transfer ownership of a handle by swapping
   // with an empty BlockCacheHandle.
-  void swap(BlockCacheHandle *dst) {
-    std::swap(this->cache_, dst->cache_);
-    std::swap(this->handle_, dst->handle_);
+  void swap(BlockCacheHandle* dst) {
+    std::swap(handle_, dst->handle_);
   }
 
   // Return the data in the cached block.
@@ -208,39 +191,33 @@ class BlockCacheHandle {
   // NOTE: this slice is only valid until the block cache handle is
   // destructed or explicitly Released().
   Slice data() const {
-    return cache_->Value(handle_);
+    return handle_.get_deleter().cache()->Value(handle_);
   }
 
   bool valid() const {
-    return handle_ != NULL;
+    return static_cast<bool>(handle_);
   }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(BlockCacheHandle);
   friend class BlockCache;
 
-  void SetHandle(Cache *cache, Cache::Handle *handle) {
-    if (handle_ != NULL) Release();
-
-    cache_ = cache;
-    handle_ = handle;
+  void SetHandle(Cache::UniqueHandle handle) {
+    handle_ = std::move(handle);
   }
 
-  Cache::Handle *handle_;
-  Cache *cache_;
+  Cache::UniqueHandle handle_;
 };
 
 
 inline BlockCache::PendingEntry& BlockCache::PendingEntry::operator=(
     BlockCache::PendingEntry&& other) noexcept {
   reset();
-  std::swap(cache_, other.cache_);
   handle_ = std::move(other.handle_);
   return *this;
 }
 
 inline void BlockCache::PendingEntry::reset() {
-  cache_ = nullptr;
   handle_.reset();
 }
 
diff --git a/src/kudu/cfile/block_handle.h b/src/kudu/cfile/block_handle.h
index d1798ad..9839049 100644
--- a/src/kudu/cfile/block_handle.h
+++ b/src/kudu/cfile/block_handle.h
@@ -73,8 +73,8 @@ class BlockHandle {
         is_data_owner_(true) {
   }
 
-  explicit BlockHandle(BlockCacheHandle *dblk_data)
-    : is_data_owner_(false) {
+  explicit BlockHandle(BlockCacheHandle* dblk_data)
+      : is_data_owner_(false) {
     dblk_data_.swap(dblk_data);
   }
 
diff --git a/src/kudu/cfile/cfile-test.cc b/src/kudu/cfile/cfile-test.cc
index 82535fe..84f5144 100644
--- a/src/kudu/cfile/cfile-test.cc
+++ b/src/kudu/cfile/cfile-test.cc
@@ -78,6 +78,7 @@
 
 DECLARE_bool(cfile_write_checksums);
 DECLARE_bool(cfile_verify_checksums);
+DECLARE_string(block_cache_type);
 
 #if defined(__linux__)
 DECLARE_string(nvm_cache_path);
@@ -88,6 +89,12 @@ METRIC_DECLARE_counter(block_cache_hits_caching);
 
 METRIC_DECLARE_entity(server);
 
+
+using kudu::fs::BlockManager;
+using kudu::fs::CountingReadableBlock;
+using kudu::fs::CreateCorruptBlock;
+using kudu::fs::ReadableBlock;
+using kudu::fs::WritableBlock;
 using std::shared_ptr;
 using std::string;
 using std::unique_ptr;
@@ -97,12 +104,6 @@ using strings::Substitute;
 namespace kudu {
 namespace cfile {
 
-using fs::BlockManager;
-using fs::CountingReadableBlock;
-using fs::CreateCorruptBlock;
-using fs::ReadableBlock;
-using fs::WritableBlock;
-
 class TestCFile : public CFileTestBase {
  protected:
   template <class DataGeneratorType>
diff --git a/src/kudu/codegen/code_cache.cc b/src/kudu/codegen/code_cache.cc
index 393659c..9213a63 100644
--- a/src/kudu/codegen/code_cache.cc
+++ b/src/kudu/codegen/code_cache.cc
@@ -80,30 +80,27 @@ Status CodeCache::AddEntry(const scoped_refptr<JITWrapper>&
value) {
   // failed, we'd just crash the process.
   auto pending(cache_->Allocate(Slice(key), val_len, /*charge = */1));
   CHECK(pending);
-  memcpy(cache_->MutableValue(pending.get()), &val, val_len);
+  memcpy(cache_->MutableValue(&pending), &val, val_len);
 
   // Because Cache only accepts void* values, we store just the JITWrapper*
   // and increase its ref count.
   value->AddRef();
 
   // Insert into cache and release the handle (we have a local copy of a refptr).
-  Cache::Handle* inserted = DCHECK_NOTNULL(cache_->Insert(
-      std::move(pending), eviction_callback_.get()));
-  cache_->Release(inserted);
+  auto inserted(cache_->Insert(std::move(pending), eviction_callback_.get()));
+  DCHECK(inserted);
   return Status::OK();
 }
 
 scoped_refptr<JITWrapper> CodeCache::Lookup(const Slice& key) {
   // Look up in Cache after generating key, returning NULL if not found.
-  Cache::Handle* found = cache_->Lookup(key, Cache::EXPECT_IN_CACHE);
-  if (!found) return scoped_refptr<JITWrapper>();
+  auto found(cache_->Lookup(key, Cache::EXPECT_IN_CACHE));
+  if (!found) {
+    return scoped_refptr<JITWrapper>();
+  }
 
   // Retrieve the value
-  scoped_refptr<JITWrapper> value = CacheValueToJITWrapper(cache_->Value(found));
-
-  // No need to hold on to handle after we have our reference-counted object.
-  cache_->Release(found);
-  return value;
+  return CacheValueToJITWrapper(cache_->Value(found));
 }
 
 } // namespace codegen
diff --git a/src/kudu/util/cache-bench.cc b/src/kudu/util/cache-bench.cc
index 6e95327..91026ae 100644
--- a/src/kudu/util/cache-bench.cc
+++ b/src/kudu/util/cache-bench.cc
@@ -115,17 +115,15 @@ class CacheBench : public KuduTest,
       char key_buf[sizeof(int_key)];
       memcpy(key_buf, &int_key, sizeof(int_key));
       Slice key_slice(key_buf, arraysize(key_buf));
-      Cache::Handle* h = cache_->Lookup(key_slice, Cache::EXPECT_IN_CACHE);
+      auto h(cache_->Lookup(key_slice, Cache::EXPECT_IN_CACHE));
       if (h) {
-        hits++;
+        ++hits;
       } else {
         auto ph(cache_->Allocate(
             key_slice, /* val_len=*/kEntrySize, /* charge=*/kEntrySize));
-        h = cache_->Insert(std::move(ph), nullptr);
+        cache_->Insert(std::move(ph), nullptr);
       }
-
-      cache_->Release(h);
-      lookups++;
+      ++lookups;
     }
     return {hits, lookups};
   }
diff --git a/src/kudu/util/cache-test.cc b/src/kudu/util/cache-test.cc
index 454f2a8..df106ec 100644
--- a/src/kudu/util/cache-test.cc
+++ b/src/kudu/util/cache-test.cc
@@ -81,12 +81,8 @@ class CacheBaseTest : public KuduTest,
   }
 
   int Lookup(int key) {
-    Cache::Handle* handle = cache_->Lookup(EncodeInt(key), Cache::EXPECT_IN_CACHE);
-    const int r = (handle == nullptr) ? -1 : DecodeInt(cache_->Value(handle));
-    if (handle != nullptr) {
-      cache_->Release(handle);
-    }
-    return r;
+    auto handle(cache_->Lookup(EncodeInt(key), Cache::EXPECT_IN_CACHE));
+    return handle ? DecodeInt(cache_->Value(handle)) : -1;
   }
 
   void Insert(int key, int value, int charge = 1) {
@@ -94,8 +90,8 @@ class CacheBaseTest : public KuduTest,
     std::string val_str = EncodeInt(value);
     auto handle(cache_->Allocate(key_str, val_str.size(), charge));
     CHECK(handle);
-    memcpy(cache_->MutableValue(handle.get()), val_str.data(), val_str.size());
-    cache_->Release(cache_->Insert(std::move(handle), this));
+    memcpy(cache_->MutableValue(&handle), val_str.data(), val_str.size());
+    cache_->Insert(std::move(handle), this);
   }
 
   void Erase(int key) {
@@ -283,15 +279,15 @@ TEST_P(CacheTest, Erase) {
 
 TEST_P(CacheTest, EntriesArePinned) {
   Insert(100, 101);
-  Cache::Handle* h1 = cache_->Lookup(EncodeInt(100), Cache::EXPECT_IN_CACHE);
+  auto h1 = cache_->Lookup(EncodeInt(100), Cache::EXPECT_IN_CACHE);
   ASSERT_EQ(101, DecodeInt(cache_->Value(h1)));
 
   Insert(100, 102);
-  Cache::Handle* h2 = cache_->Lookup(EncodeInt(100), Cache::EXPECT_IN_CACHE);
+  auto h2 = cache_->Lookup(EncodeInt(100), Cache::EXPECT_IN_CACHE);
   ASSERT_EQ(102, DecodeInt(cache_->Value(h2)));
   ASSERT_EQ(0, evicted_keys_.size());
 
-  cache_->Release(h1);
+  h1.reset();
   ASSERT_EQ(1, evicted_keys_.size());
   ASSERT_EQ(100, evicted_keys_[0]);
   ASSERT_EQ(101, evicted_values_[0]);
@@ -300,7 +296,7 @@ TEST_P(CacheTest, EntriesArePinned) {
   ASSERT_EQ(-1, Lookup(100));
   ASSERT_EQ(1, evicted_keys_.size());
 
-  cache_->Release(h2);
+  h2.reset();
   ASSERT_EQ(2, evicted_keys_.size());
   ASSERT_EQ(100, evicted_keys_[1]);
   ASSERT_EQ(102, evicted_values_[1]);
diff --git a/src/kudu/util/cache.cc b/src/kudu/util/cache.cc
index 665626d..1c0378c 100644
--- a/src/kudu/util/cache.cc
+++ b/src/kudu/util/cache.cc
@@ -72,8 +72,6 @@ const Cache::IterationFunc Cache::kIterateOverAllEntriesFunc = [](
 
 namespace {
 
-typedef simple_spinlock MutexType;
-
 // Recency list cache implementations (FIFO, LRU, etc.)
 
 // Recency list handle. An entry is a variable length heap-allocated structure.
@@ -272,7 +270,7 @@ class CacheShard {
   size_t capacity_;
 
   // mutex_ protects the following state.
-  MutexType mutex_;
+  simple_spinlock mutex_;
   size_t usage_;
 
   // Dummy head of recency list.
@@ -401,7 +399,7 @@ Cache::Handle* CacheShard<policy>::Lookup(const Slice& key,
                                           bool caching) {
   RLHandle* e;
   {
-    std::lock_guard<MutexType> l(mutex_);
+    std::lock_guard<decltype(mutex_)> l(mutex_);
     e = table_.Lookup(key, hash);
     if (e != nullptr) {
       e->refs.fetch_add(1, std::memory_order_relaxed);
@@ -441,7 +439,7 @@ Cache::Handle* CacheShard<policy>::Insert(
 
   RLHandle* to_remove_head = nullptr;
   {
-    std::lock_guard<MutexType> l(mutex_);
+    std::lock_guard<decltype(mutex_)> l(mutex_);
 
     RL_Append(handle);
 
@@ -481,7 +479,7 @@ void CacheShard<policy>::Erase(const Slice& key, uint32_t hash)
{
   RLHandle* e;
   bool last_reference = false;
   {
-    std::lock_guard<MutexType> l(mutex_);
+    std::lock_guard<decltype(mutex_)> l(mutex_);
     e = table_.Remove(key, hash);
     if (e != nullptr) {
       RL_Remove(e);
@@ -502,7 +500,7 @@ size_t CacheShard<policy>::Invalidate(const Cache::InvalidationControl&
ctl) {
   RLHandle* to_remove_head = nullptr;
 
   {
-    std::lock_guard<MutexType> l(mutex_);
+    std::lock_guard<decltype(mutex_)> l(mutex_);
 
     // rl_.next is the oldest (a.k.a. least relevant) entry in the recency list.
     RLHandle* h = rl_.next;
@@ -550,29 +548,6 @@ int DetermineShardBits() {
 
 template<Cache::EvictionPolicy policy>
 class ShardedCache : public Cache {
- private:
-  shared_ptr<MemTracker> mem_tracker_;
-  unique_ptr<CacheMetrics> metrics_;
-  vector<CacheShard<policy>*> shards_;
-
-  // Number of bits of hash used to determine the shard.
-  const int shard_bits_;
-
-  // Protects 'metrics_'. Used only when metrics are set, to ensure
-  // that they are set only once in test environments.
-  MutexType metrics_lock_;
-
-  static inline uint32_t HashSlice(const Slice& s) {
-    return util_hash::CityHash64(
-      reinterpret_cast<const char *>(s.data()), s.size());
-  }
-
-  uint32_t Shard(uint32_t hash) {
-    // Widen to uint64 before shifting, or else on a single CPU,
-    // we would try to shift a uint32_t by 32 bits, which is undefined.
-    return static_cast<uint64_t>(hash) >> (32 - shard_bits_);
-  }
-
  public:
   explicit ShardedCache(size_t capacity, const string& id)
         : shard_bits_(DetermineShardBits()) {
@@ -596,32 +571,12 @@ class ShardedCache : public Cache {
     STLDeleteElements(&shards_);
   }
 
-  Handle* Insert(UniquePendingHandle handle,
-                 Cache::EvictionCallback* eviction_callback) override {
-    RLHandle* h = reinterpret_cast<RLHandle*>(DCHECK_NOTNULL(handle.release()));
-    return shards_[Shard(h->hash)]->Insert(h, eviction_callback);
-  }
-  Handle* Lookup(const Slice& key, CacheBehavior caching) override {
-    const uint32_t hash = HashSlice(key);
-    return shards_[Shard(hash)]->Lookup(key, hash, caching == EXPECT_IN_CACHE);
-  }
-  void Release(Handle* handle) override {
-    RLHandle* h = reinterpret_cast<RLHandle*>(handle);
-    shards_[Shard(h->hash)]->Release(handle);
-  }
-  void Erase(const Slice& key) override {
-    const uint32_t hash = HashSlice(key);
-    shards_[Shard(hash)]->Erase(key, hash);
-  }
-  Slice Value(Handle* handle) override {
-    return reinterpret_cast<RLHandle*>(handle)->value();
-  }
   void SetMetrics(std::unique_ptr<CacheMetrics> metrics) override {
     // TODO(KUDU-2165): reuse of the Cache singleton across multiple MiniCluster servers
     // causes TSAN errors. So, we'll ensure that metrics only get attached once, from
     // whichever server starts first. This has the downside that, in test builds, we won't
     // get accurate cache metrics, but that's probably better than spurious failures.
-    std::lock_guard<simple_spinlock> l(metrics_lock_);
+    std::lock_guard<decltype(metrics_lock_)> l(metrics_lock_);
     if (metrics_) {
       CHECK(IsGTest()) << "Metrics should only be set once per Cache singleton";
       return;
@@ -632,6 +587,30 @@ class ShardedCache : public Cache {
     }
   }
 
+  UniqueHandle Lookup(const Slice& key, CacheBehavior caching) override {
+    const uint32_t hash = HashSlice(key);
+    return UniqueHandle(
+        shards_[Shard(hash)]->Lookup(key, hash, caching == EXPECT_IN_CACHE),
+        Cache::HandleDeleter(this));
+  }
+
+  void Erase(const Slice& key) override {
+    const uint32_t hash = HashSlice(key);
+    shards_[Shard(hash)]->Erase(key, hash);
+  }
+
+  Slice Value(const UniqueHandle& handle) const override {
+    return reinterpret_cast<const RLHandle*>(handle.get())->value();
+  }
+
+  UniqueHandle Insert(UniquePendingHandle handle,
+                      Cache::EvictionCallback* eviction_callback) override {
+    RLHandle* h = reinterpret_cast<RLHandle*>(DCHECK_NOTNULL(handle.release()));
+    return UniqueHandle(
+        shards_[Shard(h->hash)]->Insert(h, eviction_callback),
+        Cache::HandleDeleter(this));
+  }
+
   UniquePendingHandle Allocate(Slice key, int val_len, int charge) override {
     int key_len = key.size();
     DCHECK_GE(key_len, 0);
@@ -657,13 +636,8 @@ class ShardedCache : public Cache {
     return h;
   }
 
-  void Free(PendingHandle* h) override {
-    uint8_t* data = reinterpret_cast<uint8_t*>(h);
-    delete [] data;
-  }
-
-  uint8_t* MutableValue(PendingHandle* h) override {
-    return reinterpret_cast<RLHandle*>(h)->mutable_val_ptr();
+  uint8_t* MutableValue(UniquePendingHandle* handle) override {
+    return reinterpret_cast<RLHandle*>(handle->get())->mutable_val_ptr();
   }
 
   size_t Invalidate(const InvalidationControl& ctl) override {
@@ -673,6 +647,40 @@ class ShardedCache : public Cache {
     }
     return invalidated_count;
   }
+
+ protected:
+  void Release(Handle* handle) override {
+    RLHandle* h = reinterpret_cast<RLHandle*>(handle);
+    shards_[Shard(h->hash)]->Release(handle);
+  }
+
+  void Free(PendingHandle* h) override {
+    uint8_t* data = reinterpret_cast<uint8_t*>(h);
+    delete [] data;
+  }
+
+ private:
+  static inline uint32_t HashSlice(const Slice& s) {
+    return util_hash::CityHash64(
+      reinterpret_cast<const char *>(s.data()), s.size());
+  }
+
+  uint32_t Shard(uint32_t hash) {
+    // Widen to uint64 before shifting, or else on a single CPU,
+    // we would try to shift a uint32_t by 32 bits, which is undefined.
+    return static_cast<uint64_t>(hash) >> (32 - shard_bits_);
+  }
+
+  shared_ptr<MemTracker> mem_tracker_;
+  unique_ptr<CacheMetrics> metrics_;
+  vector<CacheShard<policy>*> shards_;
+
+  // Number of bits of hash used to determine the shard.
+  const int shard_bits_;
+
+  // Protects 'metrics_'. Used only when metrics are set, to ensure
+  // that they are set only once in test environments.
+  simple_spinlock metrics_lock_;
 };
 
 }  // end anonymous namespace
diff --git a/src/kudu/util/cache.h b/src/kudu/util/cache.h
index 628b452..6c90b0f 100644
--- a/src/kudu/util/cache.h
+++ b/src/kudu/util/cache.h
@@ -34,7 +34,6 @@ struct CacheMetrics;
 
 class Cache {
  public:
-
   // Type of memory backing the cache's storage.
   enum class MemoryType {
     DRAM,
@@ -57,39 +56,22 @@ class Cache {
   class EvictionCallback {
    public:
     virtual void EvictedEntry(Slice key, Slice value) = 0;
-    virtual ~EvictionCallback() {}
+    virtual ~EvictionCallback() = default;
   };
 
-  Cache() { }
+  Cache() = default;
 
   // Destroys all existing entries by calling the "deleter"
   // function that was passed to the constructor.
   virtual ~Cache();
 
+  // Set the cache metrics to update corresponding counters accordingly.
+  virtual void SetMetrics(std::unique_ptr<CacheMetrics> metrics) = 0;
+
   // Opaque handle to an entry stored in the cache.
   struct Handle { };
 
-  // Custom handle "deleter", primarily intended for use with std::unique_ptr.
-  //
-  // Sample usage:
-  //
-  //   unique_ptr<Cache> cache(NewCache(...));
-  //   ...
-  //   {
-  //     unique_ptr<Cache::Handle, Cache::HandleDeleter> h(
-  //       cache->Lookup(...), Cache::HandleDeleter(cache));
-  //     ...
-  //   } // 'h' is automatically released here
-  //
-  // Or:
-  //
-  //   unique_ptr<Cache> cache(NewCache(...));
-  //   ...
-  //   {
-  //     Cache::UniqueHandle h(cache->Lookup(...), Cache::HandleDeleter(cache));
-  //     ...
-  //   } // 'h' is automatically released here
-  //
+  // Custom deleter: intended for use with std::unique_ptr<Handle>.
   class HandleDeleter {
    public:
     explicit HandleDeleter(Cache* c)
@@ -102,14 +84,22 @@ class Cache {
       }
     }
 
+    Cache* cache() const {
+      return c_;
+    }
+
    private:
     Cache* c_;
   };
+
+  // UniqueHandle -- a wrapper around opaque Handle structure to facilitate
+  // automatic reference counting of cache's handles.
   typedef std::unique_ptr<Handle, HandleDeleter> UniqueHandle;
 
   // Opaque handle to an entry which is being prepared to be added to the cache.
   struct PendingHandle { };
 
+  // Custom deleter: intended for use with std::unique_ptr<PendingHandle>.
   class PendingHandleDeleter {
    public:
     explicit PendingHandleDeleter(Cache* c)
@@ -117,12 +107,21 @@ class Cache {
     }
 
     void operator()(Cache::PendingHandle* h) const {
-      c_->Free(h);
+      if (h != nullptr) {
+        c_->Free(h);
+      }
+    }
+
+    Cache* cache() const {
+      return c_;
     }
 
    private:
     Cache* c_;
   };
+
+  // UniquePendingHandle -- a wrapper around opaque PendingHandle structure
+  // to facilitate automatic reference counting newly allocated cache's handles.
   typedef std::unique_ptr<PendingHandle, PendingHandleDeleter> UniquePendingHandle;
 
   // Passing EXPECT_IN_CACHE will increment the hit/miss metrics that track the number of
times
@@ -137,29 +136,37 @@ class Cache {
 
   // If the cache has no mapping for "key", returns NULL.
   //
-  // Else return a handle that corresponds to the mapping.  The caller
-  // must call this->Release(handle) when the returned mapping is no
-  // longer needed.
-  virtual Handle* Lookup(const Slice& key, CacheBehavior caching) = 0;
-
-  // Release a mapping returned by a previous Lookup().
-  // REQUIRES: handle must not have been released yet.
-  // REQUIRES: handle must have been returned by a method on *this.
-  virtual void Release(Handle* handle) = 0;
-
-  // Return the value encapsulated in a handle returned by a
-  // successful Lookup().
-  // REQUIRES: handle must not have been released yet.
-  // REQUIRES: handle must have been returned by a method on *this.
-  virtual Slice Value(Handle* handle) = 0;
+  // Else return a handle that corresponds to the mapping.
+  //
+  // Sample usage:
+  //
+  //   unique_ptr<Cache> cache(NewCache(...));
+  //   ...
+  //   {
+  //     Cache::UniqueHandle h(cache->Lookup(...)));
+  //     ...
+  //   } // 'h' is automatically released here
+  //
+  // Or:
+  //
+  //   unique_ptr<Cache> cache(NewCache(...));
+  //   ...
+  //   {
+  //     auto h(cache->Lookup(...)));
+  //     ...
+  //   } // 'h' is automatically released here
+  //
+  virtual UniqueHandle Lookup(const Slice& key, CacheBehavior caching) = 0;
 
   // If the cache contains entry for key, erase it.  Note that the
   // underlying entry will be kept around until all existing handles
   // to it have been released.
   virtual void Erase(const Slice& key) = 0;
 
-  // Set the cache metrics to update corresponding counters accordingly.
-  virtual void SetMetrics(std::unique_ptr<CacheMetrics> metrics) = 0;
+  // Return the value encapsulated in a raw handle returned by a successful
+  // Lookup().
+  virtual Slice Value(const UniqueHandle& handle) const = 0;
+
 
   // ------------------------------------------------------------
   // Insertion path
@@ -173,13 +180,13 @@ class Cache {
   // For example:
   //
   //   auto ph(cache_->Allocate("my entry", value_size, charge));
-  //   if (!ReadDataFromDisk(cache_->MutableValue(ph.get())).ok()) {
+  //   if (!ReadDataFromDisk(cache_->MutableValue(&ph)).ok()) {
   //     ... error handling ...
   //     return;
   //   }
-  //   Handle* h = cache_->Insert(std::move(ph), my_eviction_callback);
+  //   UniqueHandle h(cache_->Insert(std::move(ph), my_eviction_callback));
   //   ...
-  //   cache_->Release(h);
+  //   // 'h' is automatically released.
 
   // Indicates that the charge of an item in the cache should be calculated
   // based on its memory consumption.
@@ -189,7 +196,7 @@ class Cache {
   //
   // The provided 'key' is copied into the resulting handle object.
   // The allocated handle has enough space such that the value can
-  // be written into cache_->MutableValue(handle).
+  // be written into cache_->MutableValue(&handle).
   //
   // If 'charge' is not 'kAutomaticCharge', then the cache capacity will be charged
   // the explicit amount. This is useful when caching items that are small but need to
@@ -214,25 +221,20 @@ class Cache {
     return Allocate(key, val_len, kAutomaticCharge);
   }
 
-  virtual uint8_t* MutableValue(PendingHandle* handle) = 0;
+  virtual uint8_t* MutableValue(UniquePendingHandle* handle) = 0;
 
   // Commit a prepared entry into the cache.
   //
-  // Returns a handle that corresponds to the mapping.  The caller
-  // must call this->Release(handle) when the returned mapping is no
-  // longer needed. This method always succeeds and returns a non-null
-  // entry, since the space was reserved above.
+  // Returns a handle that corresponds to the mapping. This method always
+  // succeeds and returns a non-null entry, since the space was reserved above.
   //
   // The 'pending' entry passed here should have been allocated using
   // Cache::Allocate() above.
   //
   // If 'eviction_callback' is non-NULL, then it will be called when the
   // entry is later evicted or when the cache shuts down.
-  virtual Handle* Insert(UniquePendingHandle pending,
-                         EvictionCallback* eviction_callback) = 0;
-
-  // Free 'ptr', which must have been previously allocated using 'Allocate'.
-  virtual void Free(PendingHandle* ptr) = 0;
+  virtual UniqueHandle Insert(UniquePendingHandle pending,
+                              EvictionCallback* eviction_callback) = 0;
 
   // Forward declaration to simplify the layout of types/typedefs needed for the
   // Invalidate() method while trying to adhere to the code style guide.
@@ -322,6 +324,15 @@ class Cache {
     const IterationFunc iteration_func;
   };
 
+ protected:
+  // Release a mapping returned by a previous Lookup(), using raw handle.
+  // REQUIRES: handle must not have been released yet.
+  // REQUIRES: handle must have been returned by a method on *this.
+  virtual void Release(Handle* handle) = 0;
+
+  // Free 'ptr', which must have been previously allocated using 'Allocate'.
+  virtual void Free(PendingHandle* ptr) = 0;
+
  private:
   DISALLOW_COPY_AND_ASSIGN(Cache);
 };
diff --git a/src/kudu/util/file_cache-test.cc b/src/kudu/util/file_cache-test.cc
index 94c09eb..4abd743 100644
--- a/src/kudu/util/file_cache-test.cc
+++ b/src/kudu/util/file_cache-test.cc
@@ -156,10 +156,8 @@ TYPED_TEST(FileCacheTest, TestBasicOperations) {
     ASSERT_OK(this->cache_->OpenExistingFile(kFile1, &f2));
     NO_FATALS(this->AssertFdsAndDescriptors(1, 1));
     {
-      Cache::UniqueHandle uh(
-          this->cache_->cache_->Lookup(kFile1, Cache::EXPECT_IN_CACHE),
-          Cache::HandleDeleter(this->cache_->cache_.get()));
-      ASSERT_TRUE(uh.get());
+      auto uh(this->cache_->cache_->Lookup(kFile1, Cache::EXPECT_IN_CACHE));
+      ASSERT_TRUE(uh);
     }
 
     // Open a second file. This will create a new descriptor, but evict the fd
@@ -168,16 +166,12 @@ TYPED_TEST(FileCacheTest, TestBasicOperations) {
     ASSERT_OK(this->cache_->OpenExistingFile(kFile2, &f3));
     NO_FATALS(this->AssertFdsAndDescriptors(1, 2));
     {
-      Cache::UniqueHandle uh(
-          this->cache_->cache_->Lookup(kFile1, Cache::EXPECT_IN_CACHE),
-          Cache::HandleDeleter(this->cache_->cache_.get()));
-      ASSERT_FALSE(uh.get());
+      auto uh(this->cache_->cache_->Lookup(kFile1, Cache::EXPECT_IN_CACHE));
+      ASSERT_FALSE(uh);
     }
     {
-      Cache::UniqueHandle uh(
-          this->cache_->cache_->Lookup(kFile2, Cache::EXPECT_IN_CACHE),
-          Cache::HandleDeleter(this->cache_->cache_.get()));
-      ASSERT_TRUE(uh.get());
+      auto uh(this->cache_->cache_->Lookup(kFile2, Cache::EXPECT_IN_CACHE));
+      ASSERT_TRUE(uh);
     }
   }
 
diff --git a/src/kudu/util/file_cache.cc b/src/kudu/util/file_cache.cc
index effe23c..ddb9993 100644
--- a/src/kudu/util/file_cache.cc
+++ b/src/kudu/util/file_cache.cc
@@ -124,10 +124,10 @@ class BaseDescriptor {
     // equivalent to the max number of fds.
     auto pending(cache()->Allocate(filename(), sizeof(file_ptr), 1));
     CHECK(pending);
-    memcpy(cache()->MutableValue(pending.get()), &file_ptr, sizeof(file_ptr));
-    return ScopedOpenedDescriptor<FileType>(this, Cache::UniqueHandle(
-        cache()->Insert(std::move(pending), file_cache_->eviction_cb_.get()),
-        Cache::HandleDeleter(cache())));
+    memcpy(cache()->MutableValue(&pending), &file_ptr, sizeof(file_ptr));
+    return ScopedOpenedDescriptor<FileType>(
+        this, cache()->Insert(std::move(pending),
+                              file_cache_->eviction_cb_.get()));
   }
 
   // Retrieves a pointer to an open file object from the file cache with the
@@ -136,9 +136,8 @@ class BaseDescriptor {
   // Returns a handle to the looked up entry. The handle may or may not contain
   // an open file, depending on whether the cache hit or missed.
   ScopedOpenedDescriptor<FileType> LookupFromCache() const {
-    return ScopedOpenedDescriptor<FileType>(this, Cache::UniqueHandle(
-        cache()->Lookup(filename(), Cache::EXPECT_IN_CACHE),
-        Cache::HandleDeleter(cache())));
+    return ScopedOpenedDescriptor<FileType>(
+        this, cache()->Lookup(filename(), Cache::EXPECT_IN_CACHE));
   }
 
   // Mark this descriptor as to-be-deleted later.
@@ -205,7 +204,7 @@ class ScopedOpenedDescriptor {
 
   FileType* file() const {
     DCHECK(opened());
-    return CacheValueToFileType<FileType>(desc_->cache()->Value(handle_.get()));
+    return CacheValueToFileType<FileType>(desc_->cache()->Value(handle_));
   }
 
  private:
diff --git a/src/kudu/util/nvm_cache.cc b/src/kudu/util/nvm_cache.cc
index 4cf2828..4d3497b 100644
--- a/src/kudu/util/nvm_cache.cc
+++ b/src/kudu/util/nvm_cache.cc
@@ -534,14 +534,18 @@ class ShardedLRUCache : public Cache {
     vmem_delete(vmp_);
   }
 
-  virtual Handle* Insert(UniquePendingHandle handle,
-                         Cache::EvictionCallback* eviction_callback) OVERRIDE {
+  virtual UniqueHandle Insert(UniquePendingHandle handle,
+                              Cache::EvictionCallback* eviction_callback) OVERRIDE {
     LRUHandle* h = reinterpret_cast<LRUHandle*>(DCHECK_NOTNULL(handle.release()));
-    return shards_[Shard(h->hash)]->Insert(h, eviction_callback);
+    return UniqueHandle(
+        shards_[Shard(h->hash)]->Insert(h, eviction_callback),
+        Cache::HandleDeleter(this));
   }
-  virtual Handle* Lookup(const Slice& key, CacheBehavior caching) OVERRIDE {
+  virtual UniqueHandle Lookup(const Slice& key, CacheBehavior caching) OVERRIDE {
     const uint32_t hash = HashSlice(key);
-    return shards_[Shard(hash)]->Lookup(key, hash, caching == EXPECT_IN_CACHE);
+    return UniqueHandle(
+        shards_[Shard(hash)]->Lookup(key, hash, caching == EXPECT_IN_CACHE),
+        Cache::HandleDeleter(this));
   }
   virtual void Release(Handle* handle) OVERRIDE {
     LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
@@ -551,11 +555,11 @@ class ShardedLRUCache : public Cache {
     const uint32_t hash = HashSlice(key);
     shards_[Shard(hash)]->Erase(key, hash);
   }
-  virtual Slice Value(Handle* handle) OVERRIDE {
-    return reinterpret_cast<LRUHandle*>(handle)->value();
+  virtual Slice Value(const UniqueHandle& handle) const OVERRIDE {
+    return reinterpret_cast<const LRUHandle*>(handle.get())->value();
   }
-  virtual uint8_t* MutableValue(PendingHandle* handle) OVERRIDE {
-    return reinterpret_cast<LRUHandle*>(handle)->val_ptr();
+  virtual uint8_t* MutableValue(UniquePendingHandle* handle) OVERRIDE {
+    return reinterpret_cast<LRUHandle*>(handle->get())->val_ptr();
   }
 
   virtual void SetMetrics(unique_ptr<CacheMetrics> metrics) OVERRIDE {
diff --git a/src/kudu/util/ttl_cache.h b/src/kudu/util/ttl_cache.h
index 29516a4..cff37f4 100644
--- a/src/kudu/util/ttl_cache.h
+++ b/src/kudu/util/ttl_cache.h
@@ -162,13 +162,11 @@ class TTLCache {
   // handle, but a cached value won't be destroyed until the handle goes
   // out of scope.
   EntryHandle Get(const K& key) {
-    Cache::UniqueHandle h(cache_->Lookup(key, Cache::EXPECT_IN_CACHE),
-                          Cache::HandleDeleter(cache_.get()));
+    auto h(cache_->Lookup(key, Cache::EXPECT_IN_CACHE));
     if (!h) {
       return EntryHandle();
     }
-    auto* entry_ptr = reinterpret_cast<Entry*>(
-        cache_->Value(h.get()).mutable_data());
+    auto* entry_ptr = reinterpret_cast<Entry*>(cache_->Value(h).mutable_data());
     DCHECK(entry_ptr);
     if (entry_ptr->exp_time < MonoTime::Now()) {
       // If the entry has expired already, return null handle. The underlying
@@ -196,12 +194,11 @@ class TTLCache {
                   int charge = Cache::kAutomaticCharge) {
     auto pending(cache_->Allocate(key, sizeof(Entry), charge));
     CHECK(pending);
-    Entry* entry = reinterpret_cast<Entry*>(cache_->MutableValue(pending.get()));
+    Entry* entry = reinterpret_cast<Entry*>(cache_->MutableValue(&pending));
     entry->val_ptr = val.get();
     entry->exp_time = MonoTime::Now() + entry_ttl_;
     // Insert() evicts already existing entry with the same key, if any.
-    Cache::UniqueHandle h(cache_->Insert(std::move(pending), eviction_cb_.get()),
-                          Cache::HandleDeleter(cache_.get()));
+    auto h(cache_->Insert(std::move(pending), eviction_cb_.get()));
     // The cache takes care of the entry from this point: deallocation of the
     // resources passed via 'val' parameter is performed by the eviction callback.
     return EntryHandle(DCHECK_NOTNULL(val.release()), std::move(h));


Mime
View raw message