trafficserver-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bc...@apache.org
Subject [trafficserver] branch 8.0.x updated: MemArena refesh for better memory handling.
Date Wed, 06 Jun 2018 21:01:13 GMT
This is an automated email from the ASF dual-hosted git repository.

bcall pushed a commit to branch 8.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/8.0.x by this push:
     new d1c022d  MemArena refesh for better memory handling.
d1c022d is described below

commit d1c022d10cf4b065f3772fbd627e963d70dd1ff3
Author: Alan M. Carroll <amc@apache.org>
AuthorDate: Mon May 28 09:47:58 2018 -0500

    MemArena refesh for better memory handling.
    
    (cherry picked from commit 511906adf8d6152fc4abf8a4c7388438a94d34c7)
---
 lib/ts/MemArena.cc                     | 136 +++++++++++++--------------------
 lib/ts/MemArena.h                      |  80 ++++++++++++++-----
 lib/ts/unit-tests/test_IntrusivePtr.cc |  86 +++++++++++++++++++++
 lib/ts/unit-tests/test_MemArena.cc     | 101 ++++++++++++------------
 lib/tsconfig/IntrusivePtr.h            | 121 ++++++++++++++++-------------
 5 files changed, 317 insertions(+), 207 deletions(-)

diff --git a/lib/ts/MemArena.cc b/lib/ts/MemArena.cc
index e1f3b9d..9b3db24 100644
--- a/lib/ts/MemArena.cc
+++ b/lib/ts/MemArena.cc
@@ -24,113 +24,87 @@
 
 #include <algorithm>
 
-#include "MemArena.h"
+#include <ts/MemArena.h>
 #include <ts/ink_memory.h>
 #include <ts/ink_assert.h>
 
 using namespace ts;
 
-/**
-    Allocates a new internal block of memory. If there are no existing blocks, this becomes
the head of the
-     ll. If there are existing allocations, the new block is inserted in the current list.
-     If @custom == true, the new block is pushed into the generation but @current doesn't
change.
-        @custom == false, the new block is pushed to the head and becomes the @current internal
block.
-  */
-inline MemArena::Block *
-MemArena::newInternalBlock(size_t n, bool custom)
+void
+MemArena::Block::operator delete(void *ptr)
 {
-  // Allocate Block header and actual underlying memory together for locality and fewer calls.
-  // ALLOC_HEADER_SIZE to account for malloc/free headers to try to minimize pages required.
-  static constexpr size_t FREE_SPACE_PER_PAGE = DEFAULT_PAGE_SIZE - sizeof(Block) - ALLOC_HEADER_SIZE;
-  static_assert(ALLOC_MIN_SIZE > ALLOC_HEADER_SIZE,
-                "ALLOC_MIN_SIZE must be larger than ALLOC_HEADER_SIZE to ensure positive
allocation request size.");
-
-  // If post-freeze or reserved, bump up block size then clear.
-  n               = std::max({n, next_block_size, ALLOC_MIN_SIZE});
-  next_block_size = 0;
-
-  if (n <= FREE_SPACE_PER_PAGE) { // will fit within one page, just allocate.
-    n += sizeof(Block);           // can just allocate that much with the Block.
-  } else {
-    // Round up to next power of 2 and allocate that.
-    --n;
-    n |= n >> 1;
-    n |= n >> 2;
-    n |= n >> 4;
-    n |= n >> 8;
-    n |= n >> 16;
-    n |= n >> 32;
-    ++n;                    // power of 2 now.
-    n -= ALLOC_HEADER_SIZE; // clip presumed malloc header size.
-  }
-
-  // Allocate space for the Block instance and the request memory.
-  std::shared_ptr<Block> block(new (ats_malloc(n)) Block(n - sizeof(Block)));
+  ::free(ptr);
+}
 
-  if (current) {
-    if (!custom) {
-      block->next = current;
-      current     = block;
-    } else {
-      // Situation where we do not have enough space for a large block of memory. We don't
want
-      //  to update @current because it would be wasting memory. Create a new block for the
entire
-      //  allocation and just add it to the generation.
-      block->next   = current->next; // here, current always exists.
-      current->next = block;
-    }
-  } else { // empty
-    current = block;
+MemArena::BlockPtr
+MemArena::make_block(size_t n)
+{
+  // If post-freeze or reserved, allocate at least that much.
+  n               = std::max<size_t>(n, next_block_size);
+  next_block_size = 0; // did this, clear for next time.
+  // Add in overhead and round up to paragraph units.
+  n = Paragraph{round_up(n + ALLOC_HEADER_SIZE + sizeof(Block))};
+  // If a page or more, round up to page unit size and clip back to account for alloc header.
+  if (n >= Page::SCALE) {
+    n = Page{round_up(n)} - ALLOC_HEADER_SIZE;
   }
 
-  return block.get();
+  // Allocate space for the Block instance and the request memory and construct a Block at
the front.
+  // In theory this could use ::operator new(n) but this causes a size mismatch during ::operator
delete.
+  // Easier to use malloc and not carry a memory block size value around.
+  return BlockPtr(new (::malloc(n)) Block(n - sizeof(Block)));
 }
 
 MemArena::MemArena(size_t n)
 {
-  next_block_size = 0; // don't force larger size.
-  this->newInternalBlock(n, true);
+  next_block_size = 0; // Don't use default size.
+  current         = this->make_block(n);
 }
 
-/**
-    Returns a span of memory of @n bytes. If necessary, alloc will create a new internal
block
-     of memory in order to serve the required number of bytes.
- */
 MemSpan
 MemArena::alloc(size_t n)
 {
-  Block *block = nullptr;
-
+  MemSpan zret;
   current_alloc += n;
 
   if (!current) {
-    block = this->newInternalBlock(n, false);
-  } else {
-    if (current->size - current->allocated /* remaining size */ < n) {
-      if (n >= DEFAULT_PAGE_SIZE && n >= (current->size / 2)) {
-        block = this->newInternalBlock(n, true);
-      } else {
-        block = this->newInternalBlock(current->size * 2, false);
-      }
+    current = this->make_block(n);
+    zret    = current->alloc(n);
+  } else if (n > current->remaining()) { // too big, need another block
+    if (next_block_size < n) {
+      next_block_size = 2 * current->size;
+    }
+    BlockPtr block = this->make_block(n);
+    // For the new @a current, pick the block which will have the most free space after taking
+    // the request space out of the new block.
+    zret = block->alloc(n);
+    if (block->remaining() > current->remaining()) {
+      block->next = current;
+      current     = block;
+#if defined(__clang_analyzer__)
+      // Defeat another clang analyzer false positive. Unit tests validate the code is correct.
+      ink_assert(current.use_count() > 1);
+#endif
     } else {
-      block = current.get();
+      block->next   = current->next;
+      current->next = block;
+#if defined(__clang_analyzer__)
+      // Defeat another clang analyzer false positive. Unit tests validate the code is correct.
+      ink_assert(block.use_count() > 1);
+#endif
     }
+  } else {
+    zret = current->alloc(n);
   }
-
-  ink_assert(block->data() != nullptr);
-  ink_assert(block->size >= n);
-
-  auto zret = block->remnant().prefix(n);
-  block->allocated += n;
-
   return zret;
 }
 
 MemArena &
 MemArena::freeze(size_t n)
 {
-  prev            = current;
-  prev_alloc      = current_alloc;
-  current         = nullptr;
+  prev       = current;
+  prev_alloc = current_alloc;
+  current.reset();
   next_block_size = n ? n : current_alloc;
   current_alloc   = 0;
 
@@ -141,7 +115,7 @@ MemArena &
 MemArena::thaw()
 {
   prev_alloc = 0;
-  prev       = nullptr;
+  prev.reset();
   return *this;
 }
 
@@ -165,9 +139,9 @@ MemArena::contains(const void *ptr) const
 MemArena &
 MemArena::clear()
 {
-  prev          = nullptr;
-  prev_alloc    = 0;
-  current       = nullptr;
+  prev.reset();
+  prev_alloc = 0;
+  current.reset();
   current_alloc = 0;
 
   return *this;
diff --git a/lib/ts/MemArena.h b/lib/ts/MemArena.h
index b635689..ad10cee 100644
--- a/lib/ts/MemArena.h
+++ b/lib/ts/MemArena.h
@@ -23,9 +23,12 @@
 
 #pragma once
 
+#include <new>
 #include <mutex>
 #include <memory>
 #include <ts/MemSpan.h>
+#include <ts/Scalar.h>
+#include <tsconfig/IntrusivePtr.h>
 
 /// Apache Traffic Server commons.
 namespace ts
@@ -43,13 +46,16 @@ namespace ts
 class MemArena
 {
   using self_type = MemArena; ///< Self reference type.
-public:
+protected:
+  struct Block;
+  using BlockPtr = ts::IntrusivePtr<Block>;
+  friend struct IntrusivePtrPolicy<Block>;
   /** Simple internal arena block of memory. Maintains the underlying memory.
    */
-  struct Block {
-    size_t size;                 ///< Actual block size.
-    size_t allocated{0};         ///< Current allocated (in use) bytes.
-    std::shared_ptr<Block> next; ///< Previously allocated block list.
+  struct Block : public ts::IntrusivePtrCounter {
+    size_t size;         ///< Actual block size.
+    size_t allocated{0}; ///< Current allocated (in use) bytes.
+    BlockPtr next;       ///< Previously allocated block list.
 
     /** Construct to have @a n bytes of available storage.
      *
@@ -66,14 +72,31 @@ public:
     size_t remaining() const;
     /// Span of unallocated storage.
     MemSpan remnant();
+    /** Allocate @a n bytes from this block.
+     *
+     * @param n Number of bytes to allocate.
+     * @return The span of memory allocated.
+     */
+    MemSpan alloc(size_t n);
+
     /** Check if the byte at address @a ptr is in this block.
      *
      * @param ptr Address of byte to check.
      * @return @c true if @a ptr is in this block, @c false otherwise.
      */
     bool contains(const void *ptr) const;
+
+    /** Override standard delete.
+     *
+     * This is required because the allocated memory size is larger than the class size which
requires
+     * passing different parameters to de-allocate the memory.
+     *
+     * @param ptr Memory to be de-allocated.
+     */
+    static void operator delete(void *ptr);
   };
 
+public:
   /** Default constructor.
    * Construct with no memory.
    */
@@ -95,11 +118,11 @@ public:
   MemSpan alloc(size_t n);
 
   /** Adjust future block allocation size.
-   * This does not cause allocation, but instead makes a note of the size @a n and when a
new block
-   * is needed, it will be at least @a n bytes. This is most useful for default constructed
instances
-   * where the initial allocation should be delayed until use.
-   * @param n Minimum size of next allocated block.
-   * @return @a this
+      This does not cause allocation, but instead makes a note of the size @a n and when
a new block
+      is needed, it will be at least @a n bytes. This is most useful for default constructed
instances
+      where the initial allocation should be delayed until use.
+      @param n Minimum size of next allocated block.
+      @return @a this
    */
   self_type &reserve(size_t n);
 
@@ -159,16 +182,22 @@ public:
    */
   size_t extent() const;
 
-private:
-  /// creates a new @Block of size @n and places it within the @allocations list.
-  /// @return a pointer to the block to allocate from.
-  Block *newInternalBlock(size_t n, bool custom);
+protected:
+  /// creates a new @c Block with at least @n free space.
 
-  static constexpr size_t DEFAULT_BLOCK_SIZE = 1 << 15; ///< 32kb
-  static constexpr size_t DEFAULT_PAGE_SIZE  = 1 << 12; ///< 4kb
-  static constexpr size_t ALLOC_HEADER_SIZE  = 16;      ///< Guess of overhead of @c malloc
-  /// Never allocate less than this.
-  static constexpr size_t ALLOC_MIN_SIZE = 2 * ALLOC_HEADER_SIZE;
+  /** Internally allocates a new block of memory of size @a n bytes.
+   *
+   * @param n Size of block to allocate.
+   * @return
+   */
+  BlockPtr make_block(size_t n);
+
+  using Page      = ts::Scalar<4096>; ///< Size for rounding block sizes.
+  using Paragraph = ts::Scalar<16>;   ///< Minimum unit of memory allocation.
+
+  static constexpr size_t ALLOC_HEADER_SIZE = 16; ///< Guess of overhead of @c malloc
+  /// Initial block size to allocate if not specified via API.
+  static constexpr size_t DEFAULT_BLOCK_SIZE = Page::SCALE - Paragraph{round_up(ALLOC_HEADER_SIZE
+ sizeof(Block))};
 
   size_t current_alloc = 0; ///< Total allocations in the active generation.
   /// Total allocations in the previous generation. This is only non-zero while the arena
is frozen.
@@ -176,8 +205,8 @@ private:
 
   size_t next_block_size = DEFAULT_BLOCK_SIZE; ///< Next internal block size
 
-  std::shared_ptr<Block> prev    = nullptr; ///< Previous generation.
-  std::shared_ptr<Block> current = nullptr; ///< Head of allocations list. Allocate
from this.
+  BlockPtr prev;    ///< Previous generation.
+  BlockPtr current; ///< Head of allocations list. Allocate from this.
 };
 
 // Implementation
@@ -209,6 +238,15 @@ MemArena::Block::remaining() const
   return size - allocated;
 }
 
+inline MemSpan
+MemArena::Block::alloc(size_t n)
+{
+  ink_assert(n <= this->remaining());
+  MemSpan zret = this->remnant().prefix(n);
+  allocated += n;
+  return zret;
+}
+
 inline MemArena::MemArena() {}
 
 inline MemSpan
diff --git a/lib/ts/unit-tests/test_IntrusivePtr.cc b/lib/ts/unit-tests/test_IntrusivePtr.cc
index 79c9350..c2fcf7e 100644
--- a/lib/ts/unit-tests/test_IntrusivePtr.cc
+++ b/lib/ts/unit-tests/test_IntrusivePtr.cc
@@ -43,11 +43,19 @@ struct Obscure : private ts::IntrusivePtrCounter {
   friend class ts::IntrusivePtr<Obscure>;
 };
 
+struct Item : public ts::IntrusivePtrCounter {
+  ts::IntrusivePtr<Item> _next;
+  Item() { ++_count; }
+  ~Item() { --_count; }
+  static int _count; // instance count.
+};
+
 struct Atomic : ts::IntrusivePtrAtomicCounter {
   int _q{0};
 };
 
 int Thing::_count{0};
+int Item::_count{0};
 
 TEST_CASE("IntrusivePtr", "[libts][IntrusivePtr]")
 {
@@ -85,6 +93,84 @@ TEST_CASE("IntrusivePtr", "[libts][IntrusivePtr]")
   op->_text.assign("Text");
 }
 
+// List test.
+TEST_CASE("IntrusivePtr List", "[libts][IntrusivePtr]")
+{
+  // The clang analyzer claims this type of list manipularion leads to use after free because
of
+  // premature class destruction but these tests verify that is a false positive.
+
+  using LP = ts::IntrusivePtr<Item>;
+
+  LP list{new Item}; // start a list
+  {
+    // Add an item to the front of the list.
+    LP item{new Item};
+
+    REQUIRE(Item::_count == 2);
+    REQUIRE(list.use_count() == 1);
+    REQUIRE(item.use_count() == 1);
+    item->_next = list;
+    REQUIRE(Item::_count == 2);
+    REQUIRE(list.use_count() == 2);
+    REQUIRE(item.use_count() == 1);
+    list = item;
+    REQUIRE(Item::_count == 2);
+    REQUIRE(list.use_count() == 2);
+  }
+  REQUIRE(Item::_count == 2);
+  REQUIRE(list.use_count() == 1);
+  REQUIRE(list->_next.use_count() == 1);
+
+  {
+    // add an item after the first element in a non-empty list.
+    LP item{new Item};
+
+    REQUIRE(Item::_count == 3);
+    REQUIRE(list.use_count() == 1);
+    REQUIRE(item.use_count() == 1);
+    item->_next = list->_next;
+    REQUIRE(Item::_count == 3);
+    REQUIRE(list.use_count() == 1);
+    REQUIRE(item.use_count() == 1);
+    REQUIRE(item->_next.use_count() == 2);
+    list->_next = item;
+    REQUIRE(Item::_count == 3);
+    REQUIRE(item.use_count() == 2);
+    REQUIRE(list->_next.get() == item.get());
+    REQUIRE(item->_next.use_count() == 1);
+  }
+  REQUIRE(Item::_count == 3);
+  REQUIRE(list.use_count() == 1);
+  REQUIRE(list->_next.use_count() == 1);
+  list.reset();
+  REQUIRE(Item::_count == 0);
+
+  list.reset(new Item); // start a list
+  REQUIRE(Item::_count == 1);
+  {
+    // Add item after first in singleton list.
+    LP item{new Item};
+    REQUIRE(Item::_count == 2);
+    REQUIRE(list.use_count() == 1);
+    REQUIRE(item.use_count() == 1);
+    REQUIRE(!list->_next);
+    item->_next = list->_next;
+    REQUIRE(!item->_next);
+    REQUIRE(Item::_count == 2);
+    REQUIRE(list.use_count() == 1);
+    REQUIRE(item.use_count() == 1);
+    list->_next = item;
+    REQUIRE(Item::_count == 2);
+    REQUIRE(item.use_count() == 2);
+    REQUIRE(list->_next.get() == item.get());
+  }
+  REQUIRE(Item::_count == 2);
+  REQUIRE(list.use_count() == 1);
+  REQUIRE(list->_next.use_count() == 1);
+  list.reset();
+  REQUIRE(Item::_count == 0);
+}
+
 // Cross type tests.
 TEST_CASE("IntrusivePtr CrossType", "[libts][IntrusivePtr]")
 {
diff --git a/lib/ts/unit-tests/test_MemArena.cc b/lib/ts/unit-tests/test_MemArena.cc
index a99c202..6e358b4 100644
--- a/lib/ts/unit-tests/test_MemArena.cc
+++ b/lib/ts/unit-tests/test_MemArena.cc
@@ -24,93 +24,84 @@
 #include <catch.hpp>
 
 #include <ts/MemArena.h>
+using ts::MemSpan;
+using ts::MemArena;
 
 TEST_CASE("MemArena generic", "[libts][MemArena]")
 {
   ts::MemArena arena{64};
   REQUIRE(arena.size() == 0);
-  ts::MemSpan span1 = arena.alloc(32);
-  ts::MemSpan span2 = arena.alloc(32);
+  REQUIRE(arena.extent() >= 64);
 
+  ts::MemSpan span1 = arena.alloc(32);
   REQUIRE(span1.size() == 32);
-  REQUIRE(span2.size() == 32);
-  REQUIRE(span1.data() != span2.data());
 
-  arena.freeze();
-  REQUIRE(arena.size() == 0);
-  REQUIRE(arena.allocated_size() == 64);
+  ts::MemSpan span2 = arena.alloc(32);
+  REQUIRE(span2.size() == 32);
 
-  span1 = arena.alloc(64);
-  REQUIRE(span1.size() == 64);
-  REQUIRE(arena.size() == 64);
-  arena.thaw();
+  REQUIRE(span1.data() != span2.data());
   REQUIRE(arena.size() == 64);
-  REQUIRE(arena.allocated_size() == 64);
 
-  arena.freeze();
+  auto extent{arena.extent()};
   span1 = arena.alloc(128);
-  REQUIRE(span1.size() == 128);
-  REQUIRE(arena.size() == 128);
-  REQUIRE(arena.allocated_size() == 192);
-  REQUIRE(arena.remaining() == 0);
-  REQUIRE(arena.contains((char *)span1.data()));
-
-  arena.thaw();
-  REQUIRE(arena.size() == 128);
-  REQUIRE(arena.remaining() == 0);
+  REQUIRE(extent < arena.extent());
 }
 
 TEST_CASE("MemArena freeze and thaw", "[libts][MemArena]")
 {
-  ts::MemArena arena{64};
-  arena.freeze();
-  REQUIRE(arena.size() == 0);
-  arena.alloc(64);
-  REQUIRE(arena.size() == 64);
-  arena.thaw();
-  REQUIRE(arena.size() == 64);
-  arena.freeze();
-  arena.thaw();
-  REQUIRE(arena.size() == 0);
-  REQUIRE(arena.remaining() == 0);
-
-  arena.alloc(1024);
+  MemArena arena;
+  MemSpan span1{arena.alloc(1024)};
+  REQUIRE(span1.size() == 1024);
   REQUIRE(arena.size() == 1024);
+
   arena.freeze();
+
   REQUIRE(arena.size() == 0);
   REQUIRE(arena.allocated_size() == 1024);
   REQUIRE(arena.extent() >= 1024);
+
   arena.thaw();
   REQUIRE(arena.size() == 0);
   REQUIRE(arena.extent() == 0);
 
-  arena.freeze(64); // scale down
-  arena.alloc(64);
-  REQUIRE(arena.size() == 64);
-  REQUIRE(arena.remaining() == 0);
+  arena.reserve(2000);
+  arena.alloc(512);
+  arena.alloc(1024);
+  REQUIRE(arena.extent() >= 1536);
+  REQUIRE(arena.extent() < 3000);
+  auto extent = arena.extent();
+
+  arena.freeze();
+  arena.alloc(512);
+  REQUIRE(arena.extent() > extent); // new extent should be bigger.
+  arena.thaw();
+  REQUIRE(arena.size() == 512);
+  REQUIRE(arena.extent() > 1536);
 
   arena.clear();
   REQUIRE(arena.size() == 0);
-  REQUIRE(arena.remaining() == 0);
-  REQUIRE(arena.allocated_size() == 0);
+  REQUIRE(arena.extent() == 0);
+
+  arena.alloc(512);
+  arena.alloc(768);
+  arena.freeze(32000);
+  arena.thaw();
+  arena.alloc(1);
+  REQUIRE(arena.extent() >= 32000);
 }
 
 TEST_CASE("MemArena helper", "[libts][MemArena]")
 {
   ts::MemArena arena{256};
   REQUIRE(arena.size() == 0);
-  REQUIRE(arena.remaining() == 256);
   ts::MemSpan s = arena.alloc(56);
   REQUIRE(arena.size() == 56);
-  REQUIRE(arena.remaining() == 200);
   void *ptr = s.begin();
 
   REQUIRE(arena.contains((char *)ptr));
   REQUIRE(arena.contains((char *)ptr + 100)); // even though span isn't this large, this
pointer should still be in arena
   REQUIRE(!arena.contains((char *)ptr + 300));
   REQUIRE(!arena.contains((char *)ptr - 1));
-  REQUIRE(arena.contains((char *)ptr + 255));
-  REQUIRE(!arena.contains((char *)ptr + 256));
 
   arena.freeze(128);
   REQUIRE(arena.contains((char *)ptr));
@@ -124,9 +115,6 @@ TEST_CASE("MemArena helper", "[libts][MemArena]")
   arena.thaw();
   REQUIRE(!arena.contains((char *)ptr));
   REQUIRE(arena.contains((char *)ptr2));
-
-  REQUIRE(arena.remaining() == 128 - 10);
-  REQUIRE(arena.allocated_size() == 10);
 }
 
 TEST_CASE("MemArena large alloc", "[libts][MemArena]")
@@ -164,7 +152,6 @@ TEST_CASE("MemArena block allocation", "[libts][MemArena]")
   ts::MemSpan s3 = arena.alloc(16);
 
   REQUIRE(s.size() == 32);
-  REQUIRE(arena.remaining() == 0);
   REQUIRE(arena.allocated_size() == 64);
 
   REQUIRE(arena.contains((char *)s.begin()));
@@ -187,10 +174,20 @@ TEST_CASE("MemArena full blocks", "[libts][MemArena]")
   size_t init_size = 32000;
 
   arena.reserve(init_size);
-  arena.alloc(init_size - 64);
-  arena.alloc(32000); // should in its own box - exactly sized.
-  arena.alloc(64000); // same here.
+  MemSpan m1{arena.alloc(init_size - 64)};
+  MemSpan m2{arena.alloc(32000)};
+  MemSpan m3{arena.alloc(64000)};
 
   REQUIRE(arena.remaining() >= 64);
   REQUIRE(arena.extent() > 32000 + 64000 + init_size);
+  REQUIRE(arena.extent() < 2 * (32000 + 64000 + init_size));
+
+  // Let's see if that memory is really there.
+  memset(m1.data(), 0xa5, m1.size());
+  memset(m2.data(), 0xc2, m2.size());
+  memset(m3.data(), 0x56, m3.size());
+
+  REQUIRE(std::all_of(m1.begin(), m1.end(), [](uint8_t c) { return 0xa5 == c; }));
+  REQUIRE(std::all_of(m2.begin(), m2.end(), [](uint8_t c) { return 0xc2 == c; }));
+  REQUIRE(std::all_of(m3.begin(), m3.end(), [](uint8_t c) { return 0x56 == c; }));
 }
diff --git a/lib/tsconfig/IntrusivePtr.h b/lib/tsconfig/IntrusivePtr.h
index 93370f3..cb09d0a 100644
--- a/lib/tsconfig/IntrusivePtr.h
+++ b/lib/tsconfig/IntrusivePtr.h
@@ -102,13 +102,15 @@ class IntrusivePtrAtomicCounter
 {
   template < typename T > friend class IntrusivePtr;
 
-  using self_type = IntrusivePtrAtomicCounter;
+  using self_type = IntrusivePtrAtomicCounter; ///< Self reference type.
 
 public:
+  /// Copy constructor - do not copy reference count.
   IntrusivePtrAtomicCounter(self_type const & that);
   /// No move constructor - that won't work for an object that is the target of a shared
pointer.
   IntrusivePtrAtomicCounter(self_type && that) = delete;
 
+  /// Self assignment - do not copy reference count.
   self_type &operator=(self_type const &that);
   /// No move assignment - that won't work for an object that is the target of a shared pointer.
   self_type &operator=(self_type &&that) = delete;
@@ -130,6 +132,13 @@ protected:
     This is a reference counted smart pointer. A single object is jointly ownded by a set
of
     pointers. When the last of the pointers is destructed the target object is also destructed.
 
+    To use @c IntrusivePtr on type @a T, @a T must inherit from a reference counter class,
+    such as @c IntrusivePtrCounter or @c IntrusivePtrAtomicCounter. If this inheritance is
@c public
+    then no other support is required. If the inheritance is not @c public then @a T must
declare
+    @c IntrusivePtr<T> as a @c friend class.
+
+    If it is not possible to have @a T inherit from a reference counter class, use @c std::shared_ptr.
+
     The smart pointer actions can be changed through class specific policy by specializing
the @c
     IntrusivePtrPolicy template class.
 */
@@ -137,31 +146,34 @@ template <typename T> class IntrusivePtr
 {
 private:                          /* don't pollute client with these typedefs */
   using self_type = IntrusivePtr;      ///< Self reference type.
-  template < typename U > friend class IntrusivePtr; /// Make friends wih our clones.
+  template < typename U > friend class IntrusivePtr; // Make friends with siblings
for cross type casts.
 public:
-  /// The underlying (integral) type of the reference counter.
-  using count_type = long int;
+  /// The externally used type for the reference count.
+  using counter_type = long int;
 
-  /// Default constructor (nullptr initialized).
+  /// Default constructor (@c nullptr initialized).
   IntrusivePtr();
   /// Construct from instance.
   /// The instance becomes referenced and owned by the pointer.
   explicit IntrusivePtr(T *obj);
-  /// Destructor.
+  /// Destructor. If last referencing pointer, the contained instance is destroyed.
   ~IntrusivePtr();
 
-  /// Copy constructor
+  /// Copy constructor. A new reference to the object is created.
   IntrusivePtr(self_type const& that);
-  /// Move constructor.
+  /// Move constructor. The reference in @a that is moved to @a this.
   IntrusivePtr(self_type && that);
-  /// Self assignment.
+  /// Self assignment. A new reference to the object is created.
   self_type& operator=(self_type const& that);
-  /// Move assignment.
+  /// Move assignment. The reference in @a that is moved to @a this.
   self_type& operator=(self_type && that);
 
   /** Assign from instance.
-      The instance becomes referenced and owned by the pointer.
-      The reference to the current object is dropped.
+
+      The instance becomes referenced and owned by the pointer. The reference to the current
object
+      is dropped.
+
+      @internal Direct assignment of a raw pointer was supported but I think that is too
dangerous.
   */
   void reset(T *obj = nullptr);
 
@@ -172,6 +184,9 @@ public:
       vitiates the point of this class, but it is occasionally the right thing to do. Use
with
       caution.
 
+      @internal Unfortunately this is current used and can't be removed at this time. Hopefully
some
+      refactoring elsewhere will make it obsolete.
+
       @return @c true if there are no references upon return,
       @c false if the reference count is not zero.
    */
@@ -215,7 +230,7 @@ public:
 
   /// Reference count.
   /// @return Number of references.
-  count_type use_count() const;
+  counter_type use_count() const;
 
 private:
   T *m_obj{nullptr}; ///< Pointer to object.
@@ -227,9 +242,14 @@ private:
 };
 
 /** Pointer dynamic cast.
-    This allows a smart pointer to be cast from one type to another.
-    It must be used when the types do not implicitly convert (generally
-    a downcast).
+
+    This allows a smart pointer to be cast from one type to another.  It must be used when
the types
+    do not implicitly convert (generally a downcast).
+
+    @tparam T Type of the resulting intrusive pointer. Must be explicit.
+    @tparam U Type of the intrusive pointer @a src - can be deduced.
+    @param src The original intrusive pointer.
+    @return An intrusive pointer to @a T pointing at the same object as @a src.
 
     @code
     class A { ... };
@@ -241,18 +261,23 @@ private:
 */
 template <typename T, typename U>
 IntrusivePtr<T>
-dynamic_ptr_cast(IntrusivePtr<U> const &src ///< Source pointer.
-                 )
+dynamic_ptr_cast(IntrusivePtr<U> const &src)
 {
   return IntrusivePtr<T>(dynamic_cast<T *>(src.get()));
 }
 
 /** Pointer cast.
+
     This allows a smart pointer to be cast from one type to another.
     It must be used when the types do not implicitly convert (generally
     a downcast). This uses @c static_cast and so performs only compile
     time checks.
 
+    @tparam T Type of the resulting intrusive pointer. Must be explicit.
+    @tparam U Type of the intrusive pointer @a src - can be deduced.
+    @param src The original intrusive pointer.
+    @return An intrusive pointer to @a T pointing at the same object as @a src.
+
     @code
     class A { ... };
     class B : public A { ... };
@@ -261,12 +286,9 @@ dynamic_ptr_cast(IntrusivePtr<U> const &src ///< Source
pointer.
     the_b = ptr_cast<B>(really_b);
     @endcode
 */
-template <typename T, ///< Target type.
-          typename U  ///< Source type.
-          >
+template <typename T, typename U>
 IntrusivePtr<T>
-ptr_cast(IntrusivePtr<U> const &src ///< Source pointer.
-         )
+ptr_cast(IntrusivePtr<U> const &src)
 {
   return IntrusivePtr<T>(static_cast<T *>(src.get()));
 }
@@ -274,13 +296,12 @@ ptr_cast(IntrusivePtr<U> const &src ///< Source pointer.
 /* ----------------------------------------------------------------------- */
 /** Default policy class for intrusive pointers.
 
-    This allows per type policy, although not per target instance.
-    Clients can override policy by specializing this class for the
-    target type.
+    This allows per type policy, although not per target instance. Clients can override policy
by
+    specializing @c IntrusivePtrPolicy and inheriting from this class to provide default
actions.
 
     @code
     template <> IntrusivePtrPolicy<SomeType>
-      : IntrusivePtrDefaultPolicy {
+      : IntrusivePtrDefaultPolicy<SomeType> {
      ... Redefinition of methods and nested types ...
     };
     @endcode
@@ -291,30 +312,27 @@ ptr_cast(IntrusivePtr<U> const &src ///< Source pointer.
     anyway.
 */
 
-template <typename T> class IntrusivePtrPolicy
+template <typename T> class IntrusivePtrDefaultPolicy
 {
 public:
   /// Called when the pointer is dereferenced.
   /// Default is empty (no action).
-  static void dereferenceCheck(T * ///< Target object.
-                               );
+  static void dereferenceCheck(T *);
 
   /** Perform clean up on a target object that is no longer referenced.
 
-      Default is calling @c delete. Any specialization that overrides this
-      @b must clean up the object. The primary use of this is to perform
-      a clean up other than @c delete.
+      @param t Instance to finalize.
 
-      @note When this is called, the target object reference count
-      is zero. If it is necessary to pass a smart pointer to the
-      target object, it will be necessary to call
-      @c IntrusivePtr::release to drop the reference without
-      another finalization. Further care must be taken that none of
-      the called logic keeps a copy of the smart pointer. Use with
-      caution.
+      Default is calling @c delete. Any specialization that overrides this @b must clean
up the
+      object. The primary use of this is to perform a clean up other than @c delete.
+
+      @note When this is called, the target object reference count is zero. If it is necessary
to
+      pass a smart pointer to the target object, it will be necessary to call @c
+      IntrusivePtr::release to drop the reference without another finalization. Further care
must be
+      taken that none of the called logic keeps a copy of the smart pointer. Use with caution.
   */
-  static void finalize(T *t ///< Target object.
-                       );
+  static void finalize(T *t);
+
   /// Strict weak order for STL containers.
   class Order : public std::binary_function<IntrusivePtr<T>, IntrusivePtr<T>,
bool>
   {
@@ -322,15 +340,12 @@ public:
     /// Default constructor.
     Order() {}
     /// Compare by raw pointer.
-    bool operator()(IntrusivePtr<T> const &lhs, ///< Left hand operand.
-                    IntrusivePtr<T> const &rhs  ///< Right hand operand.
-                    ) const;
+    bool operator()(IntrusivePtr<T> const &lhs, IntrusivePtr<T> const &rhs)
const;
   };
 };
 
-struct IntrusivePtrDefaultPolicyTag {
-};
-typedef IntrusivePtrPolicy<IntrusivePtrDefaultPolicyTag> IntrusivePtrDefaultPolicy;
+// If no specialization is provided then use the default;
+template < typename T > struct IntrusivePtrPolicy : public IntrusivePtrDefaultPolicy<T>
{};
 /* ----------------------------------------------------------------------- */
 /* ----------------------------------------------------------------------- */
 /* Inline Methods */
@@ -359,20 +374,20 @@ IntrusivePtrAtomicCounter::operator=(self_type const &)
 
 template <typename T>
 void
-IntrusivePtrPolicy<T>::dereferenceCheck(T *)
+IntrusivePtrDefaultPolicy<T>::dereferenceCheck(T *)
 {
 }
 
 template <typename T>
 void
-IntrusivePtrPolicy<T>::finalize(T *obj)
+IntrusivePtrDefaultPolicy<T>::finalize(T *obj)
 {
   delete obj;
 }
 
 template <typename T>
 bool
-IntrusivePtrPolicy<T>::Order::operator()(IntrusivePtr<T> const &lhs, IntrusivePtr<T>
const &rhs) const
+IntrusivePtrDefaultPolicy<T>::Order::operator()(IntrusivePtr<T> const &lhs,
IntrusivePtr<T> const &rhs) const
 {
   return lhs.get() < rhs.get();
 }
@@ -580,10 +595,10 @@ template <typename T> IntrusivePtr<T>::operator T *() const
 }
 
 template <typename T>
-typename IntrusivePtr<T>::count_type
+typename IntrusivePtr<T>::counter_type
 IntrusivePtr<T>::use_count() const
 {
-  return m_obj ? count_type{m_obj->m_intrusive_pointer_reference_count} : count_type{0};
+  return m_obj ? counter_type{m_obj->m_intrusive_pointer_reference_count} : counter_type{0};
 }
 /* ----------------------------------------------------------------------- */
 /* ----------------------------------------------------------------------- */

-- 
To stop receiving notification emails like this one, please contact
bcall@apache.org.

Mime
View raw message