trafficserver-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject [3/8] trafficserver git commit: TS-974: Partial Object Caching.
Date Mon, 29 Jun 2015 11:47:52 GMT
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/StatSystem.cc
----------------------------------------------------------------------
diff --git a/proxy/StatSystem.cc b/proxy/StatSystem.cc
index 391cdd8..fa4d102 100644
--- a/proxy/StatSystem.cc
+++ b/proxy/StatSystem.cc
@@ -40,7 +40,6 @@
 
 #define SNAP_USAGE_PERIOD HRTIME_SECONDS(2)
 
-
 // variables
 
 #ifdef DEBUG
@@ -65,7 +64,6 @@ int snap_stats_every = 60;
 ink_hrtime http_handler_times[MAX_HTTP_HANDLER_EVENTS];
 int http_handler_counts[MAX_HTTP_HANDLER_EVENTS];
 
-
 char snap_filename[PATH_NAME_MAX] = DEFAULT_SNAP_FILENAME;
 
 #define DEFAULT_PERSISTENT
@@ -117,7 +115,6 @@ static int non_persistent_stats[] = {
 #undef _FOOTER
 #undef _D
 
-
 // functions
 
 static int
@@ -372,7 +369,6 @@ stat_callback(Continuation *cont, HTTPHdr *header)
     snprintf(result, result_size - 7, "<pre>\n%s", buffer);
   }
 
-
   if (!empty) {
     StatPageData data;
 
@@ -425,7 +421,8 @@ initialize_all_global_stats()
 
   if (access(rundir, R_OK | W_OK) == -1) {
     Warning("Unable to access() local state directory '%s': %d, %s", (const char *)rundir, errno, strerror(errno));
-    Warning(" Please set 'proxy.config.local_state_dir' to allow statistics collection");
+    Warning(" Please set 'proxy.config.local_state_dir' to allow statistics "
+            "collection");
   }
   REC_ReadConfigString(snap_file, "proxy.config.stats.snap_file", PATH_NAME_MAX);
   Layout::relative_to(snap_filename, sizeof(snap_filename), (const char *)rundir, snap_file);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/TestClusterHash.cc
----------------------------------------------------------------------
diff --git a/proxy/TestClusterHash.cc b/proxy/TestClusterHash.cc
index 73ab208..3663a4d 100644
--- a/proxy/TestClusterHash.cc
+++ b/proxy/TestClusterHash.cc
@@ -28,7 +28,6 @@
 #include "Cluster.h"
 #include "libts.h"
 
-
 //
 // This test function produces the table included
 // in Memo.ClusterHash

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/TestPreProc.cc
----------------------------------------------------------------------
diff --git a/proxy/TestPreProc.cc b/proxy/TestPreProc.cc
index 68fef0d..562721c 100644
--- a/proxy/TestPreProc.cc
+++ b/proxy/TestPreProc.cc
@@ -70,7 +70,6 @@ RequestInput::run()
   char *buff = m_cb->getWrite(&maxBytes);
   unsigned writeBytes = (m_len < maxBytes) ? m_len : maxBytes;
 
-
   writeBytes = ink_strlcpy(buff, m_sp, maxBytes);
   m_cb->wrote(writeBytes);
 
@@ -177,6 +176,5 @@ main()
     cout << "Elapsed time for " << lc << "loops is " << elapsedTime << endl;
   }
 
-
   return (0);
 }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/TestProxy.cc
----------------------------------------------------------------------
diff --git a/proxy/TestProxy.cc b/proxy/TestProxy.cc
index 5d807a0..fcc061f 100644
--- a/proxy/TestProxy.cc
+++ b/proxy/TestProxy.cc
@@ -30,7 +30,6 @@
 #include "OneWayMultiTunnel.h"
 #include "Cache.h"
 
-
 struct TestProxy : Continuation {
   VConnection *vc;
   VConnection *vconnection_vector[2];

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/TestSimpleProxy.cc
----------------------------------------------------------------------
diff --git a/proxy/TestSimpleProxy.cc b/proxy/TestSimpleProxy.cc
index 6cc70f1..412df16 100644
--- a/proxy/TestSimpleProxy.cc
+++ b/proxy/TestSimpleProxy.cc
@@ -137,7 +137,6 @@ struct TestProxy : Continuation {
   }
 };
 
-
 struct TestAccept : Continuation {
   int
   startEvent(int event, NetVConnection *e)

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/TimeTrace.h
----------------------------------------------------------------------
diff --git a/proxy/TimeTrace.h b/proxy/TimeTrace.h
index 05c1a01..e6445c2 100644
--- a/proxy/TimeTrace.h
+++ b/proxy/TimeTrace.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 /****************************************************************************
 
   TimeTrace.h

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/Transform.h
----------------------------------------------------------------------
diff --git a/proxy/Transform.h b/proxy/Transform.h
index 8585fa0..af0dca3 100644
--- a/proxy/Transform.h
+++ b/proxy/Transform.h
@@ -108,5 +108,4 @@ num_chars_for_int(int64_t i)
 
 extern TransformProcessor transformProcessor;
 
-
 #endif /* __TRANSFORM_H__ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/UDPAPIClientTest.cc
----------------------------------------------------------------------
diff --git a/proxy/UDPAPIClientTest.cc b/proxy/UDPAPIClientTest.cc
index 88e74a2..ff41f21 100644
--- a/proxy/UDPAPIClientTest.cc
+++ b/proxy/UDPAPIClientTest.cc
@@ -28,7 +28,6 @@
 #include <string.h>
 #include <arpa/inet.h>
 
-
 char sendBuff[] = "I'm Alive.";
 
 FILE *fp;
@@ -90,7 +89,6 @@ UDPClient_handle_callbacks(TSCont cont, TSEvent event, void *e)
         for (int i = 0; i < avail; i++)
           fprintf(fp, "%c", *(buf + i));
 
-
         memcpy((char *)&recvBuff + total_len, buf, avail);
         TSIOBufferReaderConsume(reader, avail);
         total_len += avail;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/api/ts/InkAPIPrivateIOCore.h
----------------------------------------------------------------------
diff --git a/proxy/api/ts/InkAPIPrivateIOCore.h b/proxy/api/ts/InkAPIPrivateIOCore.h
index 9b7371a..2eea5ee 100644
--- a/proxy/api/ts/InkAPIPrivateIOCore.h
+++ b/proxy/api/ts/InkAPIPrivateIOCore.h
@@ -128,7 +128,6 @@ TSReturnCode sdk_sanity_check_iocore_structure(void *);
 tsapi TSMutex TSMutexCreateInternal(void);
 tsapi int TSMutexCheck(TSMutex mutex);
 
-
 /* IOBuffer */
 tsapi void TSIOBufferReaderCopy(TSIOBufferReader readerp, const void *buf, int64_t length);
 tsapi int64_t TSIOBufferBlockDataSizeGet(TSIOBufferBlock blockp);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/api/ts/remap.h
----------------------------------------------------------------------
diff --git a/proxy/api/ts/remap.h b/proxy/api/ts/remap.h
index c6f3cab..34705cb 100644
--- a/proxy/api/ts/remap.h
+++ b/proxy/api/ts/remap.h
@@ -38,22 +38,26 @@ extern "C" {
 
 typedef struct _tsremap_api_info {
   unsigned long size;            /* in: sizeof(struct _tsremap_api_info) */
-  unsigned long tsremap_version; /* in: TS supported version ((major << 16) | minor) */
+  unsigned long tsremap_version; /* in: TS supported version ((major << 16) |
+                                    minor) */
 } TSRemapInterface;
 
-
 typedef struct _tm_remap_request_info {
-  /* Important: You should *not* release these buf pointers or TSMLocs from your plugin! */
+  /* Important: You should *not* release these buf pointers or TSMLocs from your
+   * plugin! */
 
   /* these URL mloc's are read only, use normal ts/ts.h APIs for accesing  */
   TSMLoc mapFromUrl;
   TSMLoc mapToUrl;
 
-  /* the request URL mloc and buffer pointers are read-write. You can read and modify the
-   requestUrl using normal ts/ts.h APIs, which is how you change the destination URL. */
+  /* the request URL mloc and buffer pointers are read-write. You can read and
+   modify the
+   requestUrl using normal ts/ts.h APIs, which is how you change the destination
+   URL. */
   TSMLoc requestUrl;
 
-  /* requestBufp and requestHdrp are the equivalent of calling TSHttpTxnClientReqGet(). */
+  /* requestBufp and requestHdrp are the equivalent of calling
+   * TSHttpTxnClientReqGet(). */
   TSMBuffer requestBufp;
   TSMLoc requestHdrp;
 
@@ -61,7 +65,6 @@ typedef struct _tm_remap_request_info {
   int redirect;
 } TSRemapRequestInfo;
 
-
 /* This is the type returned by the TSRemapDoRemap() callback */
 typedef enum {
   TSREMAP_NO_REMAP = 0,       /* No remaping was done, continue with next in chain */
@@ -74,13 +77,14 @@ typedef enum {
      -500 to -599
      ....
      This would allow a plugin to generate an error page. Right now,
-     setting the return code to any negative number is equivalent to TSREMAP_NO_REMAP */
+     setting the return code to any negative number is equivalent to
+     TSREMAP_NO_REMAP */
   TSREMAP_ERROR = -1 /* Some error, that should generate an error page */
 } TSRemapStatus;
 
-
 /* ----------------------------------------------------------------------------------
-   These are the entry points a plugin can implement. Note that TSRemapInit() and
+   These are the entry points a plugin can implement. Note that TSRemapInit()
+   and
    TSRemapDoRemap() are both required.
    ----------------------------------------------------------------------------------
 */
@@ -92,33 +96,33 @@ typedef enum {
 */
 tsapi TSReturnCode TSRemapInit(TSRemapInterface *api_info, char *errbuf, int errbuf_size);
 
-
 /* Remap new request
    Mandatory interface function.
    Remap API plugin can/should use SDK API function calls inside this function!
    return: TSREMAP_NO_REMAP - No remaping was done, continue with next in chain
            TSREMAP_DID_REMAP - Remapping was done, continue with next in chain
-           TSREMAP_NO_REMAP_STOP - No remapping was done, and stop plugin chain evaluation
-           TSREMAP_DID_REMAP_STOP -  Remapping was done, but stop plugin chain evaluation
+           TSREMAP_NO_REMAP_STOP - No remapping was done, and stop plugin chain
+   evaluation
+           TSREMAP_DID_REMAP_STOP -  Remapping was done, but stop plugin chain
+   evaluation
 */
 tsapi TSRemapStatus TSRemapDoRemap(void *ih, TSHttpTxn rh, TSRemapRequestInfo *rri);
 
-
 /* Plugin shutdown, called when plugin is unloaded.
    Optional function. */
 tsapi void TSRemapDone(void);
 
-
-/* Plugin new instance. Create new plugin processing entry for unique remap record.
+/* Plugin new instance. Create new plugin processing entry for unique remap
+   record.
    First two arguments in argv vector are - fromURL and toURL from remap record.
-   Please keep in mind that fromURL and toURL will be converted to canonical view.
+   Please keep in mind that fromURL and toURL will be converted to canonical
+   view.
    Return: TS_SUCESS
            TS_ERROR - instance creation error
 */
 tsapi TSReturnCode TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_size);
 tsapi void TSRemapDeleteInstance(void *);
 
-
 /* Check response code from Origin Server
    os_response_type -> TSServerState
    Remap API plugin can use InkAPI function calls inside TSRemapDoRemap()

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/api/ts/ts.h
----------------------------------------------------------------------
diff --git a/proxy/api/ts/ts.h b/proxy/api/ts/ts.h
index 779f99c..6453c41 100644
--- a/proxy/api/ts/ts.h
+++ b/proxy/api/ts/ts.h
@@ -729,7 +729,6 @@ TSUrlPercentEncode(TSMBuffer bufp, TSMLoc offset, char *dst, size_t dst_size, si
 */
 tsapi TSReturnCode TSStringPercentDecode(const char *str, size_t str_len, char *dst, size_t dst_size, size_t *length);
 
-
 /* --------------------------------------------------------------------------
    MIME headers */
 
@@ -1006,7 +1005,8 @@ tsapi TSReturnCode TSMimeHdrFieldValueUintSet(TSMBuffer bufp, TSMLoc hdr, TSMLoc
 tsapi TSReturnCode TSMimeHdrFieldValueDateSet(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, time_t value);
 
 tsapi TSReturnCode TSMimeHdrFieldValueAppend(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, int idx, const char *value, int length);
-/* These Insert() APIs should be considered. Use the corresponding Set() API instead */
+/* These Insert() APIs should be considered. Use the corresponding Set() API
+ * instead */
 tsapi TSReturnCode
 TSMimeHdrFieldValueStringInsert(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, int idx, const char *value, int length);
 tsapi TSReturnCode TSMimeHdrFieldValueIntInsert(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, int idx, int value);
@@ -1338,7 +1338,8 @@ tsapi struct sockaddr const *TSHttpTxnServerAddrGet(TSHttpTxn txnp);
     This must be invoked before the origin server address is looked up.
     If called no lookup is done, the address @a addr is used instead.
 
-    @return @c TS_SUCCESS if the origin server address is set, @c TS_ERROR otherwise.
+    @return @c TS_SUCCESS if the origin server address is set, @c TS_ERROR
+   otherwise.
 */
 tsapi TSReturnCode TSHttpTxnServerAddrSet(TSHttpTxn txnp, struct sockaddr const *addr /**< Address for origin server. */
                                           );
@@ -1496,8 +1497,10 @@ tsapi void *TSHttpTxnArgGet(TSHttpTxn txnp, int arg_idx);
 tsapi void TSHttpSsnArgSet(TSHttpSsn ssnp, int arg_idx, void *arg);
 tsapi void *TSHttpSsnArgGet(TSHttpSsn ssnp, int arg_idx);
 
-/* The reserve API should only be use in TSAPI plugins, during plugin initialization! */
-/* The lookup methods can be used anytime, but are best used during initialization as well,
+/* The reserve API should only be use in TSAPI plugins, during plugin
+ * initialization! */
+/* The lookup methods can be used anytime, but are best used during
+   initialization as well,
    or at least "cache" the results for best performance. */
 tsapi TSReturnCode TSHttpArgIndexReserve(const char *name, const char *description, int *arg_idx);
 tsapi TSReturnCode TSHttpArgIndexNameLookup(const char *name, int *arg_idx, const char **description);
@@ -1534,7 +1537,8 @@ tsapi void TSHttpTxnDebugSet(TSHttpTxn txnp, int on);
 tsapi int TSHttpTxnDebugGet(TSHttpTxn txnp);
 /**
        Set the session specific debugging flag for this client session.
-       When turned on, internal debug messages related to this session and all transactions
+       When turned on, internal debug messages related to this session and all
+   transactions
        in the session will be written even if the debug tag isn't on.
 
     @param ssnp Client session to change.
@@ -1624,7 +1628,8 @@ tsapi void TSHttpTxnServerIntercept(TSCont contp, TSHttpTxn txnp);
     This returns a VConn that connected to the transaction.
 
     @param addr Target address of the origin server.
-    @param tag A logging tag that can be accessed via the pitag field. May be @c NULL.
+    @param tag A logging tag that can be accessed via the pitag field. May be @c
+   NULL.
     @param id A logging id that can be access via the piid field.
  */
 tsapi TSVConn TSHttpConnectWithPluginId(struct sockaddr const *addr, char const *tag, int64_t id);
@@ -1722,10 +1727,10 @@ tsapi struct sockaddr const *TSNetVConnRemoteAddrGet(TSVConn vc);
       or cancel the attempt to connect.
 
  */
-tsapi TSAction
-TSNetConnect(TSCont contp, /**< continuation that is called back when the attempted net connection either succeeds or fails. */
-             struct sockaddr const *to /**< Address to which to connect. */
-             );
+tsapi TSAction TSNetConnect(TSCont contp,             /**< continuation that is called back when the attempted net
+                                                         connection either succeeds or fails. */
+                            struct sockaddr const *to /**< Address to which to connect. */
+                            );
 
 tsapi TSAction TSNetAccept(TSCont contp, int port, int domain, int accept_threads);
 
@@ -1947,8 +1952,10 @@ tsapi int64_t TSIOBufferReaderAvail(TSIOBufferReader readerp);
 tsapi struct sockaddr const *TSNetVConnLocalAddrGet(TSVConn vc);
 
 /* --------------------------------------------------------------------------
-   Stats and configs based on librecords raw stats (this is preferred API until we
-   rewrite stats). This system has a limitation of up to 1,500 stats max, controlled
+   Stats and configs based on librecords raw stats (this is preferred API until
+   we
+   rewrite stats). This system has a limitation of up to 1,500 stats max,
+   controlled
    via proxy.config.stat_api.max_stats_allowed (default is 512).
 
    This is available as of Apache TS v2.2.*/
@@ -1997,7 +2004,8 @@ tsapi void TSDebug(const char *tag, const char *format_str, ...) TS_PRINTFLIKE(2
     Output a debug line even if the debug tag is turned off, as long as
     debugging is enabled. Could be used as follows:
     @code
-    TSDebugSpecifc(TSHttpTxnDebugGet(txn), "plugin_tag" , "Hello World from transaction %p", txn);
+    TSDebugSpecifc(TSHttpTxnDebugGet(txn), "plugin_tag" , "Hello World from
+   transaction %p", txn);
     @endcode
     will be printed if the plugin_tag is enabled or the transaction specific
     debugging is turned on for txn.
@@ -2157,14 +2165,16 @@ tsapi TSReturnCode TSTextLogObjectRollingEnabledSet(TSTextLogObject the_object,
 tsapi void TSTextLogObjectRollingIntervalSecSet(TSTextLogObject the_object, int rolling_interval_sec);
 
 /**
-    Set the rolling offset. rolling_offset_hr specifies the hour (between 0 and 23) when log rolling
+    Set the rolling offset. rolling_offset_hr specifies the hour (between 0 and
+   23) when log rolling
     should take place.
 
  */
 tsapi void TSTextLogObjectRollingOffsetHrSet(TSTextLogObject the_object, int rolling_offset_hr);
 
 /**
-    Set the rolling size. rolling_size_mb specifies the size in MB when log rolling
+    Set the rolling size. rolling_size_mb specifies the size in MB when log
+   rolling
     should take place.
 
  */
@@ -2240,7 +2250,6 @@ tsapi void TSVConnActiveTimeoutCancel(TSVConn connp);
 */
 tsapi void TSSkipRemappingSet(TSHttpTxn txnp, int flag);
 
-
 /*
   Set or get various overridable configurations, for a transaction. This should
   probably be done as early as possible, e.g. TS_HTTP_READ_REQUEST_HDR_HOOK.
@@ -2284,7 +2293,8 @@ tsapi void TSHttpTxnRedirectUrlSet(TSHttpTxn txnp, const char *url, const int ur
 tsapi TS_DEPRECATED void TSRedirectUrlSet(TSHttpTxn txnp, const char *url, const int url_len);
 
 /**
-   Return the current (if set) redirection URL string. This is still owned by the
+   Return the current (if set) redirection URL string. This is still owned by
+   the
    core, and must not be free'd.
 
    @param txnp the transaction pointer
@@ -2327,10 +2337,13 @@ tsapi int TSHttpTxnBackgroundFillStarted(TSHttpTxn txnp);
 tsapi TSReturnCode TSBase64Decode(const char *str, size_t str_len, unsigned char *dst, size_t dst_size, size_t *length);
 tsapi TSReturnCode TSBase64Encode(const char *str, size_t str_len, char *dst, size_t dst_size, size_t *length);
 
-/* Get milestone timers, useful for measuring where we are spending time in the transaction processing */
+/* Get milestone timers, useful for measuring where we are spending time in the
+ * transaction processing */
 /**
-   Return the particular milestone timer for the transaction. If 0 is returned, it means
-   the transaction has not yet reached that milestone. Asking for an "unknown" milestone is
+   Return the particular milestone timer for the transaction. If 0 is returned,
+   it means
+   the transaction has not yet reached that milestone. Asking for an "unknown"
+   milestone is
    an error.
 
    @param txnp the transaction pointer
@@ -2344,20 +2357,25 @@ tsapi TSReturnCode TSBase64Encode(const char *str, size_t str_len, char *dst, si
 tsapi TSReturnCode TSHttpTxnMilestoneGet(TSHttpTxn txnp, TSMilestonesType milestone, TSHRTime *time);
 
 /**
-  Test whether a request / response header pair would be cacheable under the current
-  configuration. This would typically be used in TS_HTTP_READ_RESPONSE_HDR_HOOK, when
+  Test whether a request / response header pair would be cacheable under the
+  current
+  configuration. This would typically be used in TS_HTTP_READ_RESPONSE_HDR_HOOK,
+  when
   you have both the client request and server response ready.
 
   @param txnp the transaction pointer
-  @param request the client request header. If NULL, use the transactions client request.
-  @param response the server response header. If NULL, use the transactions origin response.
+  @param request the client request header. If NULL, use the transactions client
+  request.
+  @param response the server response header. If NULL, use the transactions
+  origin response.
 
   @return 1 if the request / response is cacheable, 0 otherwise
 */
 tsapi int TSHttpTxnIsCacheable(TSHttpTxn txnp, TSMBuffer request, TSMBuffer response);
 
 /**
-   Return a string respresentation for a TSServerState value. This is useful for plugin debugging.
+   Return a string respresentation for a TSServerState value. This is useful for
+   plugin debugging.
 
    @param state the value of this TSServerState
 
@@ -2366,7 +2384,8 @@ tsapi int TSHttpTxnIsCacheable(TSHttpTxn txnp, TSMBuffer request, TSMBuffer resp
 tsapi const char *TSHttpServerStateNameLookup(TSServerState state);
 
 /**
-   Return a string respresentation for a TSHttpHookID value. This is useful for plugin debugging.
+   Return a string respresentation for a TSHttpHookID value. This is useful for
+   plugin debugging.
 
    @param hook the value of this TSHttpHookID
 
@@ -2375,7 +2394,8 @@ tsapi const char *TSHttpServerStateNameLookup(TSServerState state);
 tsapi const char *TSHttpHookNameLookup(TSHttpHookID hook);
 
 /**
-   Return a string respresentation for a TSEvent value. This is useful for plugin debugging.
+   Return a string respresentation for a TSEvent value. This is useful for
+   plugin debugging.
 
    @param event the value of this TSHttpHookID
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/hdrs/HTTP.cc
----------------------------------------------------------------------
diff --git a/proxy/hdrs/HTTP.cc b/proxy/hdrs/HTTP.cc
index 25f14d1..3de7430 100644
--- a/proxy/hdrs/HTTP.cc
+++ b/proxy/hdrs/HTTP.cc
@@ -29,6 +29,7 @@
 #include "HTTP.h"
 #include "HdrToken.h"
 #include "Diags.h"
+#include "I_IOBuffer.h"
 
 /***********************************************************************
  *                                                                     *
@@ -1782,81 +1783,66 @@ ClassAllocator<HTTPCacheAlt> httpCacheAltAllocator("httpCacheAltAllocator");
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 HTTPCacheAlt::HTTPCacheAlt()
-  : m_magic(CACHE_ALT_MAGIC_ALIVE), m_writeable(1), m_unmarshal_len(-1), m_id(-1), m_rid(-1), m_request_hdr(), m_response_hdr(),
-    m_request_sent_time(0), m_response_received_time(0), m_frag_offset_count(0), m_frag_offsets(0), m_ext_buffer(NULL)
+  : m_magic(CACHE_ALT_MAGIC_ALIVE), m_unmarshal_len(-1), m_id(-1), m_rid(-1), m_frag_count(0), m_request_hdr(), m_response_hdr(),
+    m_request_sent_time(0), m_response_received_time(0), m_fragments(0), m_ext_buffer(NULL)
 {
-  m_object_key[0] = 0;
-  m_object_key[1] = 0;
-  m_object_key[2] = 0;
-  m_object_key[3] = 0;
-  m_object_size[0] = 0;
-  m_object_size[1] = 0;
+  m_flags = 0;               // set all flags to false.
+  m_flag.writeable_p = true; // except this one.
 }
 
 void
 HTTPCacheAlt::destroy()
 {
   ink_assert(m_magic == CACHE_ALT_MAGIC_ALIVE);
-  ink_assert(m_writeable);
+  ink_assert(m_flag.writeable_p);
   m_magic = CACHE_ALT_MAGIC_DEAD;
-  m_writeable = 0;
+  m_flag.writeable_p = 0;
   m_request_hdr.destroy();
   m_response_hdr.destroy();
-  m_frag_offset_count = 0;
-  if (m_frag_offsets && m_frag_offsets != m_integral_frag_offsets) {
-    ats_free(m_frag_offsets);
-    m_frag_offsets = 0;
-  }
+  m_frag_count = 0;
+  if (m_flag.table_allocated_p)
+    ats_free(m_fragments);
+  m_fragments = 0;
   httpCacheAltAllocator.free(this);
 }
 
 void
-HTTPCacheAlt::copy(HTTPCacheAlt *to_copy)
+HTTPCacheAlt::copy(HTTPCacheAlt *that)
 {
-  m_magic = to_copy->m_magic;
-  // m_writeable =      to_copy->m_writeable;
-  m_unmarshal_len = to_copy->m_unmarshal_len;
-  m_id = to_copy->m_id;
-  m_rid = to_copy->m_rid;
-  m_object_key[0] = to_copy->m_object_key[0];
-  m_object_key[1] = to_copy->m_object_key[1];
-  m_object_key[2] = to_copy->m_object_key[2];
-  m_object_key[3] = to_copy->m_object_key[3];
-  m_object_size[0] = to_copy->m_object_size[0];
-  m_object_size[1] = to_copy->m_object_size[1];
+  m_magic = that->m_magic;
+  m_unmarshal_len = that->m_unmarshal_len;
+  m_id = that->m_id;
+  m_rid = that->m_rid;
+  m_earliest = that->m_earliest;
 
-  if (to_copy->m_request_hdr.valid()) {
-    m_request_hdr.copy(&to_copy->m_request_hdr);
+  if (that->m_request_hdr.valid()) {
+    m_request_hdr.copy(&that->m_request_hdr);
   }
 
-  if (to_copy->m_response_hdr.valid()) {
-    m_response_hdr.copy(&to_copy->m_response_hdr);
+  if (that->m_response_hdr.valid()) {
+    m_response_hdr.copy(&that->m_response_hdr);
   }
 
-  m_request_sent_time = to_copy->m_request_sent_time;
-  m_response_received_time = to_copy->m_response_received_time;
-  this->copy_frag_offsets_from(to_copy);
-}
+  m_request_sent_time = that->m_request_sent_time;
+  m_response_received_time = that->m_response_received_time;
+  m_fixed_fragment_size = that->m_fixed_fragment_size;
 
-void
-HTTPCacheAlt::copy_frag_offsets_from(HTTPCacheAlt *src)
-{
-  m_frag_offset_count = src->m_frag_offset_count;
-  if (m_frag_offset_count > 0) {
-    if (m_frag_offset_count > N_INTEGRAL_FRAG_OFFSETS) {
-      /* Mixed feelings about this - technically we don't need it to be a
-         power of two when copied because currently that means it is frozen.
-         But that could change later and it would be a nasty bug to find.
-         So we'll do it for now. The relative overhead is tiny.
-      */
-      int bcount = HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS * 2;
-      while (bcount < m_frag_offset_count)
-        bcount *= 2;
-      m_frag_offsets = static_cast<FragOffset *>(ats_malloc(sizeof(FragOffset) * bcount));
-    } else {
-      m_frag_offsets = m_integral_frag_offsets;
-    }
-    memcpy(m_frag_offsets, src->m_frag_offsets, sizeof(FragOffset) * m_frag_offset_count);
+  m_frag_count = that->m_frag_count;
+
+  if (m_flag.table_allocated_p)
+    ats_free(m_fragments);
+
+  // Safe to copy now, and we need to do that before we copy the fragment table.
+  m_flags = that->m_flags;
+
+  if (that->m_fragments) {
+    size_t size = FragmentDescriptorTable::calc_size(that->m_fragments->m_n);
+    m_fragments = static_cast<FragmentDescriptorTable *>(ats_malloc(size));
+    memcpy(m_fragments, that->m_fragments, size);
+    m_flag.table_allocated_p = true;
+  } else {
+    m_fragments = 0;
+    m_flag.table_allocated_p = false;
   }
 }
 
@@ -1871,7 +1857,7 @@ HTTPInfo::create()
 void
 HTTPInfo::copy(HTTPInfo *hi)
 {
-  if (m_alt && m_alt->m_writeable) {
+  if (m_alt && m_alt->m_flag.writeable_p) {
     destroy();
   }
 
@@ -1879,14 +1865,6 @@ HTTPInfo::copy(HTTPInfo *hi)
   m_alt->copy(hi->m_alt);
 }
 
-void
-HTTPInfo::copy_frag_offsets_from(HTTPInfo *src)
-{
-  if (m_alt && src->m_alt)
-    m_alt->copy_frag_offsets_from(src->m_alt);
-}
-
-
 int
 HTTPInfo::marshal_length()
 {
@@ -1900,10 +1878,8 @@ HTTPInfo::marshal_length()
     len += m_alt->m_response_hdr.m_heap->marshal_length();
   }
 
-  if (m_alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
-    len -= sizeof(m_alt->m_integral_frag_offsets);
-    len += sizeof(FragOffset) * m_alt->m_frag_offset_count;
-  }
+  if (m_alt->m_fragments)
+    len += FragmentDescriptorTable::calc_size(m_alt->m_fragments->m_n);
 
   return len;
 }
@@ -1916,42 +1892,30 @@ HTTPInfo::marshal(char *buf, int len)
   HTTPCacheAlt *marshal_alt = (HTTPCacheAlt *)buf;
   // non-zero only if the offsets are external. Otherwise they get
   // marshalled along with the alt struct.
-  int frag_len = (0 == m_alt->m_frag_offset_count || m_alt->m_frag_offsets == m_alt->m_integral_frag_offsets) ?
-                   0 :
-                   sizeof(HTTPCacheAlt::FragOffset) * m_alt->m_frag_offset_count;
+  size_t frag_len = m_alt->m_fragments ? FragmentDescriptorTable::calc_size(m_alt->m_fragments->m_n) : 0;
 
   ink_assert(m_alt->m_magic == CACHE_ALT_MAGIC_ALIVE);
 
   // Make sure the buffer is aligned
   //    ink_assert(((intptr_t)buf) & 0x3 == 0);
 
-  // If we have external fragment offsets, copy the initial ones
-  // into the integral data.
-  if (frag_len) {
-    memcpy(m_alt->m_integral_frag_offsets, m_alt->m_frag_offsets, sizeof(m_alt->m_integral_frag_offsets));
-    frag_len -= sizeof(m_alt->m_integral_frag_offsets);
-    // frag_len should never be non-zero at this point, as the offsets
-    // should be external only if too big for the internal table.
-  }
   // Memcpy the whole object so that we can use it
   //   live later.  This involves copying a few
   //   extra bytes now but will save copying any
   //   bytes on the way out of the cache
   memcpy(buf, m_alt, sizeof(HTTPCacheAlt));
   marshal_alt->m_magic = CACHE_ALT_MAGIC_MARSHALED;
-  marshal_alt->m_writeable = 0;
+  marshal_alt->m_flag.writeable_p = 0;
   marshal_alt->m_unmarshal_len = -1;
   marshal_alt->m_ext_buffer = NULL;
   buf += HTTP_ALT_MARSHAL_SIZE;
   used += HTTP_ALT_MARSHAL_SIZE;
 
   if (frag_len > 0) {
-    marshal_alt->m_frag_offsets = static_cast<FragOffset *>(reinterpret_cast<void *>(used));
-    memcpy(buf, m_alt->m_frag_offsets + HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS, frag_len);
+    marshal_alt->m_fragments = static_cast<FragmentDescriptorTable *>(reinterpret_cast<void *>(used));
+    memcpy(buf, m_alt->m_fragments, frag_len);
     buf += frag_len;
     used += frag_len;
-  } else {
-    marshal_alt->m_frag_offsets = 0;
   }
 
   // The m_{request,response}_hdr->m_heap pointers are converted
@@ -1993,7 +1957,6 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
 
   if (alt->m_magic == CACHE_ALT_MAGIC_ALIVE) {
     // Already unmarshaled, must be a ram cache
-    //  it
     ink_assert(alt->m_unmarshal_len > 0);
     ink_assert(alt->m_unmarshal_len <= len);
     return alt->m_unmarshal_len;
@@ -2004,31 +1967,14 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
 
   ink_assert(alt->m_unmarshal_len < 0);
   alt->m_magic = CACHE_ALT_MAGIC_ALIVE;
-  ink_assert(alt->m_writeable == 0);
+  ink_assert(alt->m_flag.writeable_p == 0);
   len -= HTTP_ALT_MARSHAL_SIZE;
 
-  if (alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
-    // stuff that didn't fit in the integral slots.
-    int extra = sizeof(FragOffset) * alt->m_frag_offset_count - sizeof(alt->m_integral_frag_offsets);
-    char *extra_src = buf + reinterpret_cast<intptr_t>(alt->m_frag_offsets);
-    // Actual buffer size, which must be a power of two.
-    // Well, technically not, because we never modify an unmarshalled fragment
-    // offset table, but it would be a nasty bug should that be done in the
-    // future.
-    int bcount = HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS * 2;
-
-    while (bcount < alt->m_frag_offset_count)
-      bcount *= 2;
-    alt->m_frag_offsets =
-      static_cast<FragOffset *>(ats_malloc(bcount * sizeof(FragOffset))); // WRONG - must round up to next power of 2.
-    memcpy(alt->m_frag_offsets, alt->m_integral_frag_offsets, sizeof(alt->m_integral_frag_offsets));
-    memcpy(alt->m_frag_offsets + HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS, extra_src, extra);
-    len -= extra;
-  } else if (alt->m_frag_offset_count > 0) {
-    alt->m_frag_offsets = alt->m_integral_frag_offsets;
-  } else {
-    alt->m_frag_offsets = 0; // should really already be zero.
+  if (alt->m_fragments) {
+    alt->m_fragments = reinterpret_cast<FragmentDescriptorTable *>(buf + reinterpret_cast<intptr_t>(alt->m_fragments));
+    len -= FragmentDescriptorTable::calc_size(alt->m_fragments->m_n);
   }
+  alt->m_flag.table_allocated_p = false;
 
   HdrHeap *heap = (HdrHeap *)(alt->m_request_hdr.m_heap ? (buf + (intptr_t)alt->m_request_hdr.m_heap) : 0);
   HTTPHdrImpl *hh = NULL;
@@ -2044,6 +1990,7 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
     alt->m_request_hdr.m_http = hh;
     alt->m_request_hdr.m_mime = hh->m_fields_impl;
     alt->m_request_hdr.m_url_cached.m_heap = heap;
+    alt->m_request_hdr.mark_target_dirty();
   }
 
   heap = (HdrHeap *)(alt->m_response_hdr.m_heap ? (buf + (intptr_t)alt->m_response_hdr.m_heap) : 0);
@@ -2058,6 +2005,7 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
     alt->m_response_hdr.m_heap = heap;
     alt->m_response_hdr.m_http = hh;
     alt->m_response_hdr.m_mime = hh->m_fields_impl;
+    alt->m_response_hdr.mark_target_dirty();
   }
 
   alt->m_unmarshal_len = orig_len - len;
@@ -2078,7 +2026,7 @@ HTTPInfo::check_marshalled(char *buf, int len)
     return false;
   }
 
-  if (alt->m_writeable != false) {
+  if (alt->m_flag.writeable_p != false) {
     return false;
   }
 
@@ -2167,22 +2115,632 @@ HTTPInfo::get_handle(char *buf, int len)
   return -1;
 }
 
+HTTPInfo::FragmentDescriptor *
+HTTPInfo::force_frag_at(unsigned int idx)
+{
+  FragmentDescriptor *frag;
+  FragmentDescriptorTable *old_table = 0;
+
+  ink_assert(m_alt);
+  ink_assert(idx >= 0);
+
+  if (0 == idx)
+    return &m_alt->m_earliest;
+
+  if (0 == m_alt->m_fragments || idx > m_alt->m_fragments->m_n) { // no room at the inn
+    int64_t obj_size = this->object_size_get();
+    uint32_t ff_size = this->get_frag_fixed_size();
+    unsigned int n = 0; // set if we need to allocate, this is max array index needed.
+
+    ink_assert(ff_size);
+
+    if (0 == m_alt->m_fragments && obj_size > 0) {
+      n = (obj_size + ff_size - 1) / ff_size;
+      if (idx > n)
+        n = idx;
+      if (!m_alt->m_earliest.m_flag.cached_p)
+        ++n; // going to have an empty earliest fragment.
+    } else {
+      n = idx + MAX(4, idx >> 1); // grow by 50% and at least 4
+      old_table = m_alt->m_fragments;
+    }
+
+    size_t size = FragmentDescriptorTable::calc_size(n);
+    size_t old_size = 0;
+    unsigned int old_count = 0;
+    int64_t offset = 0;
+    CryptoHash key;
+
+    m_alt->m_fragments = static_cast<FragmentDescriptorTable *>(ats_malloc(size));
+    ink_zero(*(m_alt->m_fragments)); // just need to zero the base struct.
+    if (old_table) {
+      old_count = old_table->m_n;
+      frag = &((*old_table)[old_count]);
+      offset = frag->m_offset;
+      key = frag->m_key;
+      old_size = FragmentDescriptorTable::calc_size(old_count);
+      memcpy(m_alt->m_fragments, old_table, old_size);
+      if (m_alt->m_flag.table_allocated_p)
+        ats_free(old_table);
+    } else {
+      key = m_alt->m_earliest.m_key;
+      m_alt->m_fragments->m_cached_idx = 0;
+    }
+    m_alt->m_fragments->m_n = n;
+    m_alt->m_flag.table_allocated_p = true;
+    // fill out the new parts with offsets & keys.
+    ++old_count; // left as the index of the last frag in the previous set.
+    for (frag = &((*m_alt->m_fragments)[old_count]); old_count <= n; ++old_count, ++frag) {
+      key.next();
+      offset += ff_size;
+      frag->m_key = key;
+      frag->m_offset = offset;
+      frag->m_flags = 0;
+    }
+  }
+  ink_assert(idx > m_alt->m_fragments->m_cached_idx);
+  return &(*m_alt->m_fragments)[idx];
+}
+
 void
-HTTPInfo::push_frag_offset(FragOffset offset)
+HTTPInfo::mark_frag_write(unsigned int idx)
 {
   ink_assert(m_alt);
-  if (0 == m_alt->m_frag_offsets) {
-    m_alt->m_frag_offsets = m_alt->m_integral_frag_offsets;
-  } else if (m_alt->m_frag_offset_count >= HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS &&
-             0 == (m_alt->m_frag_offset_count & (m_alt->m_frag_offset_count - 1))) {
-    // need more space than in integral storage and we're at an upgrade
-    // size (power of 2).
-    FragOffset *nf = static_cast<FragOffset *>(ats_malloc(sizeof(FragOffset) * (m_alt->m_frag_offset_count * 2)));
-    memcpy(nf, m_alt->m_frag_offsets, sizeof(FragOffset) * m_alt->m_frag_offset_count);
-    if (m_alt->m_frag_offsets != m_alt->m_integral_frag_offsets)
-      ats_free(m_alt->m_frag_offsets);
-    m_alt->m_frag_offsets = nf;
+  ink_assert(idx >= 0);
+
+  if (idx >= m_alt->m_frag_count)
+    m_alt->m_frag_count = idx + 1;
+
+  if (0 == idx) {
+    m_alt->m_earliest.m_flag.cached_p = true;
+  } else {
+    this->force_frag_at(idx)->m_flag.cached_p = true;
+  }
+
+  // bump the last cached value if possible and mark complete if appropriate.
+  if (m_alt->m_fragments && idx == m_alt->m_fragments->m_cached_idx + 1) {
+    unsigned int j = idx + 1;
+    while (j < m_alt->m_frag_count && (*m_alt->m_fragments)[j].m_flag.cached_p)
+      ++j;
+    m_alt->m_fragments->m_cached_idx = j - 1;
+    if (!m_alt->m_flag.content_length_p &&
+        (this->get_frag_fixed_size() + this->get_frag_offset(j - 1)) > static_cast<int64_t>(m_alt->m_earliest.m_offset))
+      m_alt->m_flag.complete_p = true;
+  }
+}
+
+int
+HTTPInfo::get_frag_index_of(int64_t offset)
+{
+  int zret = 0;
+  uint32_t ff_size = this->get_frag_fixed_size();
+  FragmentDescriptorTable *table = this->get_frag_table();
+  if (!table) {
+    // Never the case that we have an empty earliest fragment *and* no frag table.
+    zret = offset / ff_size;
+  } else {
+    FragmentDescriptorTable &frags = *table; // easier to work with.
+    int n = frags.m_n;                       // also the max valid frag table index and always >= 1.
+    // I should probably make @a m_offset int64_t to avoid casting issues like this...
+    uint64_t uoffset = static_cast<uint64_t>(offset);
+
+    if (uoffset >= frags[n].m_offset) {
+      // in or past the last fragment, compute the index by computing the # of @a ff_size chunks past the end.
+      zret = n + (static_cast<uint64_t>(offset) - frags[n].m_offset) / ff_size;
+    } else if (uoffset < frags[1].m_offset) {
+      zret = 0; // in the earliest fragment.
+    } else {
+      // Need to handle old data where the offsets are not guaranteed to be regular.
+      // So we start with our guess (which should be close) and if we're right, boom, else linear
+      // search which should only be 1 or 2 steps.
+      zret = offset / ff_size;
+      if (frags[1].m_offset == 0 || 0 == zret) // zret can be zero if the earliest frag is less than @a ff_size
+        ++zret;
+      while (0 < zret && zret < n) {
+        if (uoffset < frags[zret].m_offset) {
+          --zret;
+        } else if (uoffset >= frags[zret + 1].m_offset) {
+          ++zret;
+        } else {
+          break;
+        }
+      }
+    }
+  }
+  return zret;
+}
+/***********************************************************************
+ *                                                                     *
+ *                      R A N G E   S U P P O R T                      *
+ *                                                                     *
+ ***********************************************************************/
+
+namespace
+{
+// Need to promote this out of here at some point.
+// This handles parsing an integer from a string with various limits and in 64 bits.
+struct integer {
+  static size_t const MAX_DIGITS = 15;
+  static bool
+  parse(ts::ConstBuffer const &b, uint64_t &result)
+  {
+    bool zret = false;
+    if (0 < b.size() && b.size() <= MAX_DIGITS) {
+      size_t n;
+      result = ats_strto64(b.data(), b.size(), &n);
+      zret = n == b.size();
+    }
+    return zret;
+  }
+};
+}
+
+bool
+HTTPRangeSpec::parseRangeFieldValue(char const *v, int len)
+{
+  // Maximum # of digits permitted for an offset. Avoid issues with overflow.
+  static size_t const MAX_DIGITS = 15;
+  ts::ConstBuffer src(v, len);
+  size_t n;
+
+  _state = INVALID;
+  src.skip(&ParseRules::is_ws);
+
+  if (src.size() > sizeof(HTTP_LEN_BYTES) + 1 && 0 == strncasecmp(src.data(), HTTP_VALUE_BYTES, HTTP_LEN_BYTES) &&
+      '=' == src[HTTP_LEN_BYTES]) {
+    src += HTTP_LEN_BYTES + 1;
+    while (src) {
+      ts::ConstBuffer max = src.splitOn(',');
+
+      if (!max) { // no comma so everything in @a src should be processed as a single range.
+        max = src;
+        src.reset();
+      }
+
+      ts::ConstBuffer min = max.splitOn('-');
+
+      src.skip(&ParseRules::is_ws);
+      // Spec forbids whitespace anywhere in the range element.
+
+      if (min) {
+        if (ParseRules::is_digit(*min) && min.size() <= MAX_DIGITS) {
+          uint64_t low = ats_strto64(min.data(), min.size(), &n);
+          if (n < min.size())
+            break; // extra cruft in range, not even ws allowed
+          if (max) {
+            if (ParseRules::is_digit(*max) && max.size() <= MAX_DIGITS) {
+              uint64_t high = ats_strto64(max.data(), max.size(), &n);
+              if (n < max.size() && (max += n).skip(&ParseRules::is_ws))
+                break; // non-ws cruft after maximum
+              else
+                this->add(low, high);
+            } else {
+              break; // invalid characters for maximum
+            }
+          } else {
+            this->add(low, UINT64_MAX); // "X-" : "offset X to end of content"
+          }
+        } else {
+          break; // invalid characters for minimum
+        }
+      } else {
+        if (max) {
+          if (ParseRules::is_digit(*max) && max.size() <= MAX_DIGITS) {
+            uint64_t high = ats_strto64(max.data(), max.size(), &n);
+            if (n < max.size() && (max += n).skip(&ParseRules::is_ws)) {
+              break; // cruft after end of maximum
+            } else {
+              this->add(high, 0);
+            }
+          } else {
+            break; // invalid maximum
+          }
+        }
+      }
+    }
+    if (src)
+      _state = INVALID; // didn't parse everything, must have been an error.
+  }
+  return _state != INVALID;
+}
+
+HTTPRangeSpec &
+HTTPRangeSpec::add(Range const &r)
+{
+  if (MULTI == _state) {
+    _ranges.push_back(r);
+  } else if (SINGLE == _state) {
+    _ranges.push_back(_single);
+    _ranges.push_back(r);
+    _state = MULTI;
+  } else {
+    _single = r;
+    _state = SINGLE;
+  }
+  return *this;
+}
+
+bool
+HTTPRangeSpec::apply(uint64_t len)
+{
+  if (!this->hasRanges()) {
+    // nothing - simplifying later logic.
+  } else if (0 == len) {
+    /* Must special case zero length content
+       - suffix ranges are OK but other ranges are not.
+       - Best option is to return a 200 (not 206 or 416) for all suffix range spec on zero length content.
+         (this is what Apache HTTPD does)
+       - So, mark result as either @c UNSATISFIABLE or @c EMPTY, clear all ranges.
+    */
+    _state = EMPTY;
+    if (!_single.isSuffix())
+      _state = UNSATISFIABLE;
+    for (RangeBox::iterator spot = _ranges.begin(), limit = _ranges.end(); spot != limit && EMPTY == _state; ++spot) {
+      if (!spot->isSuffix())
+        _state = UNSATISFIABLE;
+    }
+    _ranges.clear();
+  } else if (this->isSingle()) {
+    if (!_single.apply(len))
+      _state = UNSATISFIABLE;
+  } else { // gotta be MULTI
+    int src = 0, dst = 0;
+    int n = _ranges.size();
+    while (src < n) {
+      Range &r = _ranges[src];
+      if (r.apply(len)) {
+        if (src != dst)
+          _ranges[dst] = r;
+        ++dst;
+      }
+      ++src;
+    }
+    // at this point, @a dst is the # of valid ranges.
+    if (dst > 0) {
+      _single = _ranges[0];
+      if (dst == 1)
+        _state = SINGLE;
+      _ranges.resize(dst);
+    } else {
+      _state = UNSATISFIABLE;
+      _ranges.clear();
+    }
+  }
+  return this->isValid();
+}
+
+static ts::ConstBuffer const MULTIPART_BYTERANGE("multipart/byteranges", 20);
+static ts::ConstBuffer const MULTIPART_BOUNDARY("boundary", 9);
+
+int64_t
+HTTPRangeSpec::parseContentRangeFieldValue(char const *v, int len, Range &r, ts::ConstBuffer &boundary)
+{
+  // [amc] TBD - handle the multipart/byteranges syntax.
+  ts::ConstBuffer src(v, len);
+  int64_t zret = -1;
+
+  r.invalidate();
+  src.skip(&ParseRules::is_ws);
+
+  if (src.skipNoCase(MULTIPART_BYTERANGE)) {
+    while (src && (';' == *src || ParseRules::is_ws(*src)))
+      ++src;
+    if (src.skipNoCase(MULTIPART_BOUNDARY)) {
+      src.trim(&ParseRules::is_ws);
+      boundary = src;
+    }
+  } else if (src.size() > sizeof(HTTP_LEN_BYTES) + 1 && 0 == strncasecmp(src.data(), HTTP_VALUE_BYTES, HTTP_LEN_BYTES) &&
+             ParseRules::is_ws(src[HTTP_LEN_BYTES]) // must have white space
+             ) {
+    uint64_t cl, low, high;
+    bool unsatisfied_p = false, indeterminate_p = false;
+    ts::ConstBuffer min, max;
+
+    src += HTTP_LEN_BYTES;
+    src.skip(&ParseRules::is_ws); // but can have any number
+
+    max = src.splitOn('/'); // src has total length value
+
+    if (max.size() == 1 && *max == '*')
+      unsatisfied_p = true;
+    else
+      min = max.splitOn('-');
+
+    src.trim(&ParseRules::is_ws);
+    if (src && src.size() == 1 && *src == '*')
+      indeterminate_p = true;
+
+    // note - spec forbids internal spaces so it's "X-Y/Z" w/o whitespace.
+    // spec also says we can have "*/Z" or "X-Y/*" but never "*/*".
+
+    if (!(indeterminate_p && unsatisfied_p) && (indeterminate_p || integer::parse(src, cl)) &&
+        (unsatisfied_p || (integer::parse(min, low) && integer::parse(max, high)))) {
+      if (!unsatisfied_p)
+        r._min = low, r._max = high;
+      if (!indeterminate_p)
+        zret = static_cast<int64_t>(cl);
+    }
+  }
+  return zret;
+}
+
+namespace
+{
+int
+Calc_Digital_Length(uint64_t x)
+{
+  char buff[32]; // big enough for 64 bit #
+  return snprintf(buff, sizeof(buff), "%" PRIu64, x);
+}
+}
+
+uint64_t
+HTTPRangeSpec::calcPartBoundarySize(uint64_t object_size, uint64_t ct_val_len)
+{
+  size_t l_size = Calc_Digital_Length(object_size);
+  // CR LF "--" boundary-string CR LF "Content-Range" ": " "bytes " X "-" Y "/" Z CR LF Content-Type CR LF
+  uint64_t zret =
+    4 + HTTP_RANGE_BOUNDARY_LEN + 2 + MIME_LEN_CONTENT_RANGE + 2 + HTTP_LEN_BYTES + 1 + l_size + 1 + l_size + 1 + l_size + 2;
+  if (ct_val_len)
+    zret += MIME_LEN_CONTENT_TYPE + 2 + ct_val_len + 2;
+  return zret;
+}
+
+uint64_t
+HTTPRangeSpec::calcContentLength(uint64_t object_size, uint64_t ct_val_len) const
+{
+  uint64_t size = object_size;
+  size_t nr = this->count();
+
+  if (nr >= 1) {
+    size = this->size();                                                    // the real content size.
+    if (nr > 1)                                                             // part boundaries
+      size += nr * self::calcPartBoundarySize(object_size, ct_val_len) + 2; // need trailing '--'
+  }
+  return size;
+}
+
+uint64_t
+HTTPRangeSpec::writePartBoundary(MIOBuffer *out, char const *boundary_str, size_t boundary_len, uint64_t total_size, uint64_t low,
+                                 uint64_t high, MIMEField *ctf, bool final)
+{
+  size_t x;                                                  // tmp for printf results.
+  size_t loc_size = Calc_Digital_Length(total_size) * 3 + 3; // precomputed size of all the location / size text.
+  size_t n = self::calcPartBoundarySize(total_size, ctf ? ctf->m_len_value : 0) + (final ? 2 : 0);
+  Ptr<IOBufferData> d(new_IOBufferData(iobuffer_size_to_index(n, MAX_BUFFER_SIZE_INDEX), MEMALIGNED));
+  char *spot = d->data();
+
+  x = snprintf(spot, n, "\r\n--%.*s", static_cast<int>(boundary_len), boundary_str);
+  spot += x;
+  n -= x;
+  if (final) {
+    memcpy(spot, "--", 2);
+    spot += 2;
+    n -= 2;
+  }
+
+  x = snprintf(spot, n, "\r\n%.*s: %.*s", MIME_LEN_CONTENT_RANGE, MIME_FIELD_CONTENT_RANGE, HTTP_LEN_BYTES, HTTP_VALUE_BYTES);
+  spot += x;
+  n -= x;
+  spot[-HTTP_LEN_BYTES] = tolower(spot[-HTTP_LEN_BYTES]); // ugly cleanup just to be careful of stupid user agents.
+
+  x = snprintf(spot, n, " %" PRIu64 "-%" PRIu64 "/%" PRIu64, low, high, total_size);
+  // Need to space fill to match pre-computed size
+  if (x < loc_size)
+    memset(spot + x, ' ', loc_size - x);
+  spot += loc_size;
+  n -= loc_size;
+
+  if (ctf) {
+    int ctf_len;
+    char const *ctf_val = ctf->value_get(&ctf_len);
+    if (ctf_val) {
+      x = snprintf(spot, n, "\r\n%.*s: %.*s", MIME_LEN_CONTENT_TYPE, MIME_FIELD_CONTENT_TYPE, ctf_len, ctf_val);
+      spot += x;
+      n -= x;
+    }
+  }
+
+  // This also takes care of the snprintf null termination problem.
+  *spot++ = '\r';
+  *spot++ = '\n';
+  n -= 2;
+
+  ink_assert(n == 0);
+
+  IOBufferBlock *b = new_IOBufferBlock(d, spot - d->data());
+  b->_buf_end = b->_end;
+  out->append_block(b);
+
+  return spot - d->data();
+}
+
+int
+HTTPRangeSpec::print_array(char *buff, size_t len, Range const *rv, int count)
+{
+  size_t zret = 0;
+  bool first = true;
+
+  // Can't possibly write a range in less than this size buffer.
+  if (len < static_cast<size_t>(HTTP_LEN_BYTES) + 4)
+    return 0;
+
+  for (int i = 0; i < count; ++i) {
+    int n;
+
+    if (first) {
+      memcpy(buff, HTTP_VALUE_BYTES, HTTP_LEN_BYTES);
+      buff[HTTP_LEN_BYTES] = '=';
+      zret += HTTP_LEN_BYTES + 1;
+      first = false;
+    } else if (len < zret + 4) {
+      break;
+    } else {
+      buff[zret++] = ',';
+    }
+
+    n = snprintf(buff + zret, len - zret, "%" PRIu64 "-%" PRIu64, rv[i]._min, rv[i]._max);
+    if (n + zret >= len)
+      break; // ran out of room
+    else
+      zret += n;
+  }
+  return zret;
+}
+
+int
+HTTPRangeSpec::print(char *buff, size_t len) const
+{
+  return this->hasRanges() ? this->print_array(buff, len, &(*(this->begin())), this->count()) : 0;
+}
+
+int
+HTTPRangeSpec::print_quantized(char *buff, size_t len, int64_t quantum, int64_t interstitial) const
+{
+  static const int MAX_R = 20; // this needs to be promoted
+  // We will want to have a max # of ranges limit, probably a build time constant, in the not so distant
+  // future anyway, so might as well start here.
+  int qrn = 0;     // count of quantized ranges
+  Range qr[MAX_R]; // quantized ranges
+
+  // Can't possibly write a range in less than this size buffer.
+  if (len < static_cast<size_t>(HTTP_LEN_BYTES) + 4)
+    return 0;
+
+  // Avoid annoying "+1" in the adjacency checks.
+  if (interstitial < 1)
+    interstitial = 1;
+  else
+    ++interstitial;
+
+  for (const_iterator spot = this->begin(), limit = this->end(); spot != limit; ++spot) {
+    Range r(*spot);
+    int i;
+    if (quantum > 1) {
+      r._min = (r._min / quantum) * quantum;
+      r._max = ((r._max + quantum - 1) / quantum) * quantum - 1;
+    }
+    // blend in to the current ranges
+    for (i = 0; i < qrn; ++i) {
+      Range &cr = qr[i];
+      if ((r._max + interstitial) < cr._min) {
+        memmove(qr, qr + 1, sizeof(*qr) * qrn);
+        ++qrn;
+        qr[0] = r;
+        i = -1;
+        break;
+      } else if (cr._max + interstitial >= r._min) {
+        int j = i + 1;
+        cr._min = std::min(cr._min, r._min);
+        cr._max = std::max(cr._max, r._max);
+        while (j < qrn) {
+          if (qr[j]._min < cr._max + interstitial)
+            cr._max = std::max(cr._max, qr[j]._max);
+          ++j;
+        }
+        if (j < qrn)
+          memmove(qr + i + 1, qr + j, sizeof(*qr) * qrn - j);
+        qrn -= j - i;
+        i = -1;
+        break;
+      }
+    }
+    if (i >= qrn)
+      qr[qrn++] = r;
+    ink_assert(qrn <= MAX_R);
+  }
+
+  return this->print_array(buff, len, qr, qrn);
+}
+
+HTTPRangeSpec::Range
+HTTPInfo::get_range_for_frags(int low, int high)
+{
+  HTTPRangeSpec::Range zret;
+  zret._min = low < 1 ? 0 : (*m_alt->m_fragments)[low].m_offset;
+  zret._max =
+    (high >= static_cast<int>(m_alt->m_frag_count) - 1 ? this->object_size_get() : (*m_alt->m_fragments)[high + 1].m_offset) - 1;
+  return zret;
+}
+
+/* Note - we're not handling unspecified content length and trailing segments at all here.
+   Must deal with that at some point.
+*/
+
+HTTPRangeSpec::Range
+HTTPInfo::get_uncached_hull(HTTPRangeSpec const &req, int64_t initial)
+{
+  HTTPRangeSpec::Range r;
+
+  if (m_alt && !m_alt->m_flag.complete_p) {
+    HTTPRangeSpec::Range s = req.getConvexHull();
+    if (m_alt->m_fragments) {
+      FragmentDescriptorTable &fdt = *(m_alt->m_fragments);
+      int32_t lidx;
+      int32_t ridx;
+      if (s.isValid()) {
+        lidx = this->get_frag_index_of(s._min);
+        ridx = this->get_frag_index_of(s._max);
+      } else { // not a range request, get hull of all uncached fragments
+        lidx = fdt.m_cached_idx + 1;
+        // This really isn't valid if !content_length_p, need to deal with that at some point.
+        ridx = this->get_frag_index_of(this->object_size_get());
+      }
+
+      if (lidx < 2 && !m_alt->m_earliest.m_flag.cached_p)
+        lidx = 0;
+      else {
+        if (0 == lidx)
+          ++lidx; // because if we get here with lidx == 0, earliest is cached and we should skip ahead.
+        while (lidx <= ridx && fdt[lidx].m_flag.cached_p)
+          ++lidx;
+      }
+
+      while (lidx <= ridx && fdt[ridx].m_flag.cached_p)
+        --ridx;
+
+      if (lidx <= ridx)
+        r = this->get_range_for_frags(lidx, ridx);
+    } else { // no fragments past earliest cached yet
+      r._min = m_alt->m_earliest.m_flag.cached_p ? this->get_frag_fixed_size() : 0;
+      if (s.isValid()) {
+        r._min = std::max(r._min, s._min);
+        r._max = s._max;
+      } else {
+        r._max = INT64_MAX;
+      }
+    }
+    if (r.isValid() && m_alt->m_flag.content_length_p && static_cast<int64_t>(r._max) > this->object_size_get())
+      r._max = this->object_size_get();
+    if (static_cast<int64_t>(r._min) < initial && !m_alt->m_earliest.m_flag.cached_p)
+      r._min = 0;
   }
+  return r;
+}
 
-  m_alt->m_frag_offsets[m_alt->m_frag_offset_count++] = offset;
+#if 0
+bool
+HTTPInfo::get_uncached(HTTPRangeSpec const& req, HTTPRangeSpec& result)
+{
+  bool zret = false;
+  if (m_alt && !m_alt->m_flag.complete_p) {
+    FragmentAccessor frags(m_alt);
+
+    for ( HTTPRangeSpec::const_iterator spot = req.begin(), limit = req.end() ; spot != limit ; ++spot ) {
+      int32_t lidx = this->get_frag_index_of(spot->_min);
+      int32_t ridx = this->get_frag_index_of(spot->_max);
+      while (lidx <= ridx && frags[lidx].m_flag.cached_p)
+        ++lidx;
+      if (lidx > ridx) continue; // All of this range is present.
+      while (lidx <= ridx && frags[ridx].m_flag.cached_p) // must hit missing frag at lhs at the latest
+        --ridx;
+
+      if (lidx <= ridx) {
+        result.add(this->get_range_for_frags(lidx, ridx));
+        zret = true;
+      }
+    }
+  }
+  return zret;
 }
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/hdrs/HTTP.h
----------------------------------------------------------------------
diff --git a/proxy/hdrs/HTTP.h b/proxy/hdrs/HTTP.h
index 4c80bcc..cb6b4fd 100644
--- a/proxy/hdrs/HTTP.h
+++ b/proxy/hdrs/HTTP.h
@@ -1,32 +1,33 @@
 /** @file
 
-  A brief file description
+    A brief file description
 
-  @section license License
+    @section license License
 
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
 
-      http://www.apache.org/licenses/LICENSE-2.0
+    http://www.apache.org/licenses/LICENSE-2.0
 
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
- */
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+*/
 
 #ifndef __HTTP_H__
 #define __HTTP_H__
 
 #include <assert.h>
+#include <vector>
 #include "Arena.h"
-#include "INK_MD5.h"
+#include "CryptoHash.h"
 #include "MIME.h"
 #include "URL.h"
 
@@ -37,6 +38,11 @@
 #define HTTP_MAJOR(v) (((v) >> 16) & 0xFFFF)
 
 class Http2HeaderTable;
+class MIOBuffer;
+namespace ts
+{
+struct ConstBuffer;
+}
 
 enum HTTPStatus {
   HTTP_STATUS_NONE = 0,
@@ -416,6 +422,8 @@ extern int HTTP_LEN_S_MAXAGE;
 extern int HTTP_LEN_NEED_REVALIDATE_ONCE;
 extern int HTTP_LEN_100_CONTINUE;
 
+static size_t const HTTP_RANGE_BOUNDARY_LEN = 32 + 2 + 16;
+
 /* Private */
 void http_hdr_adjust(HTTPHdrImpl *hdrp, int32_t offset, int32_t length, int32_t delta);
 
@@ -463,13 +471,13 @@ int32_t http_parse_version(const char *start, const char *end);
 
 
 /*
-HTTPValAccept*         http_parse_accept (const char *buf, Arena *arena);
-HTTPValAcceptCharset*  http_parse_accept_charset (const char *buf, Arena *arena);
-HTTPValAcceptEncoding* http_parse_accept_encoding (const char *buf, Arena *arena);
-HTTPValAcceptLanguage* http_parse_accept_language (const char *buf, Arena *arena);
-HTTPValCacheControl*   http_parse_cache_control (const char *buf, Arena *arena);
-const char*            http_parse_cache_directive (const char **buf);
-HTTPValRange*          http_parse_range (const char *buf, Arena *arena);
+  HTTPValAccept*         http_parse_accept (const char *buf, Arena *arena);
+  HTTPValAcceptCharset*  http_parse_accept_charset (const char *buf, Arena *arena);
+  HTTPValAcceptEncoding* http_parse_accept_encoding (const char *buf, Arena *arena);
+  HTTPValAcceptLanguage* http_parse_accept_language (const char *buf, Arena *arena);
+  HTTPValCacheControl*   http_parse_cache_control (const char *buf, Arena *arena);
+  const char*            http_parse_cache_directive (const char **buf);
+  HTTPValRange*          http_parse_range (const char *buf, Arena *arena);
 */
 HTTPValTE *http_parse_te(const char *buf, int len, Arena *arena);
 
@@ -496,6 +504,235 @@ public:
   int32_t m_version;
 };
 
+/** A set of content ranges.
+
+    This represents the data for an HTTP range specification.
+    On a request this contains the request ranges. On a response it is the actual ranges in the
+    response, which are the requested ranges modified by the actual content length.
+*/
+struct HTTPRangeSpec {
+  typedef HTTPRangeSpec self;
+
+  /** A range of bytes in an object.
+
+      If @a _min > 0 and @a _max == 0 the range is backwards and counts from the
+      end of the object. That is (100,0) means the last 100 bytes of content.
+  */
+  struct Range {
+    uint64_t _min;
+    uint64_t _max;
+
+    /// Default constructor - invalid range.
+    Range() : _min(UINT64_MAX), _max(1) {}
+    /// Construct as the range ( @a low .. @a high )
+    Range(uint64_t low, uint64_t high) : _min(low), _max(high) {}
+
+    /// Test if this range is a suffix range.
+    bool isSuffix() const;
+    /// Test if this range is a valid range.
+    bool isValid() const;
+    /// Get the size (in bytes) of the range.
+    uint64_t size() const;
+    /** Convert range to absolute values for a content length of @a len.
+
+        @return @c true if the range was valid for @a len, @c false otherwise.
+    */
+    bool apply(uint64_t len);
+
+    /// Force the range to an empty state.
+    Range &invalidate();
+  };
+
+  /// Range iteration type.
+  typedef Range *iterator;
+  typedef Range const *const_iterator;
+
+  /// Current state of the overall specification.
+  /// @internal We can distinguish between @c SINGLE and @c MULTI by looking at the
+  /// size of @a _ranges but we need this to mark @c EMPTY vs. not.
+  enum State {
+    EMPTY,         ///< No range.
+    INVALID,       ///< Range parsing failed.
+    UNSATISFIABLE, ///< Content length application failed.
+    SINGLE,        ///< Single range.
+    MULTI,         ///< Multiple ranges.
+  } _state;
+
+  /// The first range value.
+  /// By separating this out we can avoid allocation in the case of a single
+  /// range value, which is by far the most common ( > 99% in my experience).
+  Range _single;
+  /// Storage for range values.
+  typedef std::vector<Range> RangeBox;
+  /// The first range is copied here if there is more than one (to simplify).
+  RangeBox _ranges;
+
+  /// Default constructor - empty range
+  HTTPRangeSpec();
+
+  /// Reset to re-usable state.
+  void clear();
+
+  /** Parse a Range field @a value and update @a this with the results.
+      @return @c true if @a value was a valid range specifier, @c false otherwise.
+  */
+  bool parseRangeFieldValue(char const *value, int len);
+
+  /** Parse a Content-Range field @a value.
+
+      @a r is set to the content range. If the content range is unsatisfied or a parse error the @a range is
+      set to be invalid.
+
+      @note The content length return is ambiguous on its own, the state of @a r must be checked.
+
+      - Multipart: @a boundary is not empty
+      - Parse error: @a CL == -1 and @a r is invalid
+      - Unsatisfiable: @a CL >= 0 and @a r is invalid
+      - Indeterminate: @c CL == -1 and @a r is valid
+
+      @return The content length, or -1 if there is an error or the content length is indeterminate.
+  */
+  static int64_t parseContentRangeFieldValue(char const *value, int len, Range &r, ts::ConstBuffer &boundary);
+
+  /// Print the range specification.
+  /// @return The number of characters printed.
+  int print(char *buff ///< Output buffer.
+            ,
+            size_t len ///< Size of output buffer.
+            ) const;
+
+  /// Print the range specification quantized.
+  /// @return The number of characters printed.
+  int print_quantized(char *buff ///< Output buffer.
+                      ,
+                      size_t len ///< Size of output buffer.
+                      ,
+                      int64_t quantum ///< Align ranges to multiples of this value.
+                      ,
+                      int64_t interstitial ///< Require gaps to be at least this large.
+                      ) const;
+
+  /// Print the @a ranges.
+  /// @return The number of characters printed.
+  static int print_array(char *buff ///< Output buffer.
+                         ,
+                         size_t len ///< Size of output buffer.
+                         ,
+                         Range const *ranges ///< Array of ranges
+                         ,
+                         int count ///< # of ranges
+                         );
+
+#if 0
+  /** Copy ranges from @a while applying them to the content @a length.
+
+      Ranges are copied if valid for @a length and converted to absolute offsets. The number of ranges
+      after application may be less than the @a src number of ranges. In addition ranges will be clipped
+      to @a length. 
+
+      @return @c true if the range spec is satisfiable, @c false otherwise.
+      Note a range spec with no ranges is always satisfiable and that suffix ranges are also
+      always satisfiable.
+  */
+  bool apply(self const& that, uint64_t length);
+#endif
+
+  /** Update ranges to be absolute based on content @a length.
+
+      Invalid ranges are removed, ranges will be clipped as needed, and suffix ranges will be
+      converted to absolute ranges.
+
+      @return @c true if the range spec is satisfiable (there remains at least one valid range), @c false otherwise.
+      Note a range spec with no ranges is always satisfiable and that suffix ranges are also
+      always satisfiable.
+  */
+  bool apply(uint64_t length);
+
+  /** Number of distinct ranges.
+      @return Number of ranges.
+  */
+  size_t count() const;
+
+  /// Get the size (in bytes) of the ranges.
+  uint64_t size() const;
+
+  /// If this is a valid  single range specification.
+  bool isSingle() const;
+
+  /// If this is a valid multi range specification.
+  bool isMulti() const;
+
+  /// Test if this contains at least one valid range.
+  bool hasRanges() const;
+
+  /// Test if this is a well formed range (may be empty).
+  bool isValid() const;
+
+  /// Test if this is a valid but empty range spec.
+  bool isEmpty() const;
+
+  /// Test if this is an unsatisfied range.
+  bool isUnsatisfied() const;
+
+  /// Access the range at index @a idx.
+  Range &operator[](int n);
+
+  /// Access the range at index @a idx.
+  Range const &operator[](int n) const;
+
+  /// Calculate the convex hull of the range spec.
+  /// The convex hull is the smallest single range that contains all of the ranges in the range spec.
+  /// @note This will return an invalid range if there are no ranges in the range spec.
+  /// @see HttpRangeSpec::Range::isValid
+  Range getConvexHull() const;
+
+  /** Calculate the content length for this range specification.
+
+      @note If a specific content length has not been @c apply 'd this will not produce
+      a usable result.
+
+      @return The content length for the ranges including the range separators.
+  */
+  uint64_t calcContentLength(uint64_t base_content_size, ///< Content size w/o ranges.
+                             uint64_t ct_val_len         ///< Length of Content-Type field value.
+                             ) const;
+
+  /// Calculate the length of the range part boundary header.
+  static uint64_t calcPartBoundarySize(uint64_t object_size ///< Base content size
+                                       ,
+                                       uint64_t ct_val_len ///< Length of the Content-Type value (0 if none).
+                                       );
+
+  /** Write the range part boundary to @a out.
+   */
+  static uint64_t writePartBoundary(MIOBuffer *out ///< Output IO Buffer
+                                    ,
+                                    char const *boundary_str ///< Boundary marker string.
+                                    ,
+                                    size_t boundary_len ///< Length of boundary marker string.
+                                    ,
+                                    uint64_t total_size ///< Base content size.
+                                    ,
+                                    uint64_t low ///< Low value for the range.
+                                    ,
+                                    uint64_t high ///< High value for the raNGE.
+                                    ,
+                                    MIMEField *ctf ///< Content-Type field (@c NULL if none)
+                                    ,
+                                    bool final ///< Is this the final part boundary?
+                                    );
+
+  /// Iterator for first range.
+  iterator begin();
+  const_iterator begin() const;
+  /// Iterator past last range.
+  iterator end();
+  const_iterator end() const;
+
+  self &add(uint64_t low, uint64_t high);
+  self &add(Range const &r);
+};
+
 class IOBufferReader;
 
 class HTTPHdr : public MIMEHdr
@@ -631,6 +868,7 @@ public:
 
   const char *reason_get(int *length);
   void reason_set(const char *value, int length);
+  void reason_set(HTTPStatus status);
 
   MIMEParseResult parse_req(HTTPParser *parser, const char **start, const char *end, bool eof);
   MIMEParseResult parse_resp(HTTPParser *parser, const char **start, const char *end, bool eof);
@@ -1243,6 +1481,16 @@ HTTPHdr::reason_set(const char *value, int length)
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
+inline void
+HTTPHdr::reason_set(HTTPStatus status)
+{
+  char const *phrase = http_hdr_reason_lookup(status);
+  this->reason_set(phrase, strlen(phrase));
+}
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+
 inline MIMEParseResult
 HTTPHdr::parse_req(HTTPParser *parser, const char **start, const char *end, bool eof)
 {
@@ -1316,34 +1564,114 @@ HTTPHdr::scheme_get(int *length)
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
-enum {
-  CACHE_ALT_MAGIC_ALIVE = 0xabcddeed,
-  CACHE_ALT_MAGIC_MARSHALED = 0xdcbadeed,
-  CACHE_ALT_MAGIC_DEAD = 0xdeadeed,
-};
+enum { CACHE_ALT_MAGIC_ALIVE = 0xabcddeed, CACHE_ALT_MAGIC_MARSHALED = 0xdcbadeed, CACHE_ALT_MAGIC_DEAD = 0xdeadeed };
 
-// struct HTTPCacheAlt
+/// Header for an alternate of an object.
+/// This is close to a POD, all the real API is in the @c HTTPInfo class.
+/// @note THIS IS DIRECTLY SERIALIZED TO DISK
+/// (after some tweaks, but any member in this struct will be written to disk)
 struct HTTPCacheAlt {
+  /// Information about a fragment in this alternate.
+  /// @internal Currently @c Dir has only 40 bits for the disk offset of a fragment,
+  /// and since no object (or alternate) is split across stripes (and thence disks)
+  /// no fragment can have an internal offset more than 40 bits long, so 48 bits
+  /// should suffice here.
+  struct FragmentDescriptor {
+    CryptoHash m_key;       ///< Key for fragment.
+    uint64_t m_offset : 48; ///< Starting offset of fragment in object.
+    union {
+      uint16_t m_flags;
+      struct {
+        unsigned int cached_p : 1; ///< Presence bit (is fragment in cache?)
+        unsigned int zero : 15;    ///< Zero fill for future use.
+      } m_flag;
+    };
+  };
+
+  /** Holds the table of fragment descriptors.
+
+      @internal To avoid allocating 2 chunks of memory we hang the descriptors off the end of this structure and provide
+      a method to do the calculations. The @a m_size contains the number of descriptors, the actual byte size must be
+      computed from that. The count of valid entries is held in this structure, not in the table, because it makes
+      serialization easier.  We don't serialize the explicit contents of the table struct (e.g., the capacity / @a
+      m_size value) only the descriptors.
+  */
+  struct FragmentDescriptorTable {
+    /** The number of entries in the table.
+        Because this is a 1 based array, this is also the largest valid index.
+        @note It is 1 less than the total number of fragment descriptors because earliest is stored
+        directly and not in this table.
+     */
+    uint32_t m_n;
+
+    /** Fragment index of last initial segment cached.
+
+        All fragments from the earliest to this are in cache.
+
+        @note A simple effort to minimize the cost of detecting a complete object.
+        In the normal case we'll get all the fragments in order so this will roll along nicely.
+        Otherwise we may have to do a lot of work on a single fragment, but that' still better
+        than doing it every time for every fragment.
+    */
+    uint32_t m_cached_idx;
+
+    /** Array operator for fragments in the table (1-based).
+        This is a bit tricky. The earliest fragment is special and so is @b not stored in this table.
+        To make that easier to deal with this array is one based so the containing object can simply
+        pass the index on if it's not 0 (earliest). From an external point of view the array of fragments
+        is zero based.
+     */
+    FragmentDescriptor &operator[](int idx);
+    /// Calculate the allocation size needed for a maximum array index of @a n.
+    static size_t calc_size(uint32_t n);
+  };
+
   HTTPCacheAlt();
+
   void copy(HTTPCacheAlt *to_copy);
-  void copy_frag_offsets_from(HTTPCacheAlt *src);
   void destroy();
 
   uint32_t m_magic;
 
-  // Writeable is set to true is we reside
-  //  in a buffer owned by this structure.
-  // INVARIANT: if own the buffer this HttpCacheAlt
-  //   we also own the buffers for the request &
-  //   response headers
-  int32_t m_writeable;
+  union {
+    uint32_t m_flags;
+    struct {
+      /** Do we own our own buffer?
+          @c true if the buffer containing this data is owned by this object.
+          INVARIANT: if we own this buffer then we also own the buffers for
+          @a m_request_hdr and @a m_response_hdr.
+      */
+      uint32_t writeable_p : 1;
+      /// Was this alternate originally stored as a partial object?
+      uint32_t composite_p : 1;
+      /// Did the origin tell us the actual length of the object?
+      uint32_t content_length_p : 1;
+      /// Are all fragments in cache?
+      uint32_t complete_p : 1;
+      /// Is the fragment table independently allocated?
+      uint32_t table_allocated_p : 1;
+      // Note - !composite_p => complete_p
+      //      - complete_p => content_length_p
+    } m_flag;
+  };
+
   int32_t m_unmarshal_len;
 
   int32_t m_id;
   int32_t m_rid;
 
-  int32_t m_object_key[4];
-  int32_t m_object_size[2];
+  /// # of fragments in the alternate, including the earliest fragment.
+  /// This can be zero for a resident alternate.
+  /// @internal In practice this is the high water mark for cached fragments.
+  /// Contrast with the @a m_cached_idx in the fragment table - that marks the high
+  /// water of contiguously cached fragments.
+  uint32_t m_frag_count;
+
+  /** The target size for fragments in this alternate.
+      This is @b mandatory if the object is being partially cached.
+      During read it should be used as a guideline but not considered definitive.
+  */
+  uint32_t m_fixed_fragment_size;
 
   HTTPHdr m_request_hdr;
   HTTPHdr m_response_hdr;
@@ -1351,21 +1679,23 @@ struct HTTPCacheAlt {
   time_t m_request_sent_time;
   time_t m_response_received_time;
 
-  /// # of fragment offsets in this alternate.
-  /// @note This is one less than the number of fragments.
-  int m_frag_offset_count;
-  /// Type of offset for a fragment.
-  typedef uint64_t FragOffset;
-  /// Table of fragment offsets.
-  /// @note The offsets are forward looking so that frag[0] is the
-  /// first byte past the end of fragment 0 which is also the first
-  /// byte of fragment 1. For this reason there is no fragment offset
-  /// for the last fragment.
-  FragOffset *m_frag_offsets;
-  /// # of fragment offsets built in to object.
-  static int const N_INTEGRAL_FRAG_OFFSETS = 4;
-  /// Integral fragment offset table.
-  FragOffset m_integral_frag_offsets[N_INTEGRAL_FRAG_OFFSETS];
+  /** Special case the first (earliest, non-resident) fragment.
+      This holds the key for the earliest fragment and the object size
+      by overloading the offset in this specific instance.
+  */
+  FragmentDescriptor m_earliest;
+
+  /** Descriptors for the rest of the fragments.
+      Because of this, index 0 in this array is really the next fragment after the
+      earliest fragment. We should have the invariant
+      ( @a m_fragments != 0) == ( @a m_frag_count > 1 )
+
+      @internal I thought of using @c std::vector here, but then we end up with either
+      doing 2 allocations (one for the @c std::vector and another for its contents) or
+      writing the @c std::vector container to disk (because this struct is directly
+      serialized). Instead we do our own memory management, which doesn't make me happy either.
+  */
+  FragmentDescriptorTable *m_fragments;
 
   // With clustering, our alt may be in cluster
   //  incoming channel buffer, when we are
@@ -1380,7 +1710,8 @@ struct HTTPCacheAlt {
 class HTTPInfo
 {
 public:
-  typedef HTTPCacheAlt::FragOffset FragOffset; ///< Import type.
+  typedef HTTPCacheAlt::FragmentDescriptor FragmentDescriptor;           ///< Import type.
+  typedef HTTPCacheAlt::FragmentDescriptorTable FragmentDescriptorTable; ///< Import type.
 
   HTTPCacheAlt *m_alt;
 
@@ -1408,7 +1739,6 @@ public:
   {
     m_alt = info->m_alt;
   }
-  void copy_frag_offsets_from(HTTPInfo *src);
   HTTPInfo &operator=(const HTTPInfo &m);
 
   inkcoreapi int marshal_length();
@@ -1439,9 +1769,9 @@ public:
     m_alt->m_rid = id;
   }
 
-  INK_MD5 object_key_get();
-  void object_key_get(INK_MD5 *);
-  bool compare_object_key(const INK_MD5 *);
+  CryptoHash const &object_key_get();
+  void object_key_get(CryptoHash *);
+  bool compare_object_key(const CryptoHash *);
   int64_t object_size_get();
 
   void
@@ -1483,7 +1813,7 @@ public:
     return m_alt->m_response_received_time;
   }
 
-  void object_key_set(INK_MD5 &md5);
+  void object_key_set(CryptoHash const &md5);
   void object_size_set(int64_t size);
 
   void
@@ -1508,14 +1838,66 @@ public:
     m_alt->m_response_received_time = t;
   }
 
+  bool
+  is_composite() const
+  {
+    return m_alt->m_flag.composite_p;
+  }
+  bool
+  is_complete() const
+  {
+    return m_alt->m_flag.complete_p;
+  }
+  bool
+  is_writeable() const
+  {
+    return m_alt->m_flag.writeable_p;
+  }
+
+  /** Compute the convex hull of uncached ranges.
+
+      If the resulting range has a minimum that is less than @a initial @b and the earliest fragment
+      is not cached then the minimum will be changed to zero. Alternatively, the initial uncached
+      segment must be at least @a initial bytes long.
+
+      @return An invalid range if all of the request is available in cache.
+  */
+  HTTPRangeSpec::Range get_uncached_hull(HTTPRangeSpec const &req ///< [in] UA request with content length applied
+                                         ,
+                                         int64_t initial ///< Minimize size for uncached initial data
+                                         );
+
   /// Get the fragment table.
-  FragOffset *get_frag_table();
-  /// Get the # of fragment offsets
-  /// @note This is the size of the fragment offset table, and one less
-  /// than the actual # of fragments.
-  int get_frag_offset_count();
-  /// Add an @a offset to the end of the fragment offset table.
-  void push_frag_offset(FragOffset offset);
+  /// @note There is a fragment table only for multi-fragment alternates @b and
+  /// the indexing starts with the second (non-earliest) fragment.
+  /// @deprecated - use specialized methods.
+  FragmentDescriptorTable *get_frag_table();
+
+  /// Force a descriptor at index @a idx.
+  FragmentDescriptor *force_frag_at(unsigned int idx);
+
+  /// Get the fragment index for @a offset.
+  int get_frag_index_of(int64_t offset);
+  /// Get the fragment key for an @a offset.
+  /// @note Forces fragment.
+  CryptoHash const &get_frag_key_of(int64_t offset);
+  /// Get the fragment key of the @a idx fragment.
+  /// @note Forces fragment.
+  CryptoHash const &get_frag_key(unsigned int idx);
+  /// Get the starting offset of a fragment.
+  int64_t get_frag_offset(unsigned int idx);
+
+  /// Get the number of fragments.
+  /// 0 means resident alternate, 1 means single fragment, > 1 means multi-fragment.
+  int get_frag_count() const;
+  /// Get the target fragment size.
+  uint32_t get_frag_fixed_size() const;
+  /// Mark a fragment at index @a idx as written to cache.
+  void mark_frag_write(unsigned int idx);
+  /// Check if a fragment is cached.
+  bool is_frag_cached(unsigned int idx) const;
+  /// Get the range of bytes for the fragments from @a low to @a high.
+  HTTPRangeSpec::Range get_range_for_frags(int low, int high);
 
   // Sanity check functions
   static bool check_marshalled(char *buf, int len);
@@ -1528,7 +1910,7 @@ inline void
 HTTPInfo::destroy()
 {
   if (m_alt) {
-    if (m_alt->m_writeable) {
+    if (m_alt->m_flag.writeable_p) {
       m_alt->destroy();
     } else if (m_alt->m_ext_buffer) {
       if (m_alt->m_ext_buffer->refcount_dec() == 0) {
@@ -1545,77 +1927,307 @@ inline HTTPInfo &HTTPInfo::operator=(const HTTPInfo &m)
   return *this;
 }
 
-inline INK_MD5
+inline CryptoHash const &
 HTTPInfo::object_key_get()
 {
-  INK_MD5 val;
-  int32_t *pi = reinterpret_cast<int32_t *>(&val);
-
-  pi[0] = m_alt->m_object_key[0];
-  pi[1] = m_alt->m_object_key[1];
-  pi[2] = m_alt->m_object_key[2];
-  pi[3] = m_alt->m_object_key[3];
-
-  return val;
+  return m_alt->m_earliest.m_key;
 }
 
 inline void
-HTTPInfo::object_key_get(INK_MD5 *md5)
+HTTPInfo::object_key_get(CryptoHash *key)
 {
-  int32_t *pi = reinterpret_cast<int32_t *>(md5);
-  pi[0] = m_alt->m_object_key[0];
-  pi[1] = m_alt->m_object_key[1];
-  pi[2] = m_alt->m_object_key[2];
-  pi[3] = m_alt->m_object_key[3];
+  memcpy(key, &(m_alt->m_earliest.m_key), sizeof(*key));
 }
 
 inline bool
-HTTPInfo::compare_object_key(const INK_MD5 *md5)
+HTTPInfo::compare_object_key(const CryptoHash *key)
 {
-  int32_t const *pi = reinterpret_cast<int32_t const *>(md5);
-  return ((m_alt->m_object_key[0] == pi[0]) && (m_alt->m_object_key[1] == pi[1]) && (m_alt->m_object_key[2] == pi[2]) &&
-          (m_alt->m_object_key[3] == pi[3]));
+  return *key == m_alt->m_earliest.m_key;
 }
 
 inline int64_t
 HTTPInfo::object_size_get()
 {
-  int64_t val;
-  int32_t *pi = reinterpret_cast<int32_t *>(&val);
-
-  pi[0] = m_alt->m_object_size[0];
-  pi[1] = m_alt->m_object_size[1];
-  return val;
+  return m_alt->m_earliest.m_offset;
 }
 
 inline void
-HTTPInfo::object_key_set(INK_MD5 &md5)
+HTTPInfo::object_key_set(CryptoHash const &md5)
 {
-  int32_t *pi = reinterpret_cast<int32_t *>(&md5);
-  m_alt->m_object_key[0] = pi[0];
-  m_alt->m_object_key[1] = pi[1];
-  m_alt->m_object_key[2] = pi[2];
-  m_alt->m_object_key[3] = pi[3];
+  m_alt->m_earliest.m_key = md5;
 }
 
 inline void
 HTTPInfo::object_size_set(int64_t size)
 {
-  int32_t *pi = reinterpret_cast<int32_t *>(&size);
-  m_alt->m_object_size[0] = pi[0];
-  m_alt->m_object_size[1] = pi[1];
+  m_alt->m_earliest.m_offset = size;
+  m_alt->m_flag.content_length_p = true;
+  // Invariant - if a fragment is cached, all of that fragment is cached.
+  // Therefore if the last byte is in the initial cached fragments all of the data is cached.
+  if (!m_alt->m_flag.complete_p) {
+    int64_t mco = 0; // maximum cached offset + 1
+    if (m_alt->m_fragments) {
+      if (m_alt->m_fragments->m_cached_idx >= 0)
+        mco = this->get_frag_offset(m_alt->m_fragments->m_cached_idx) + this->get_frag_fixed_size();
+    } else if (m_alt->m_earliest.m_flag.cached_p) {
+      mco = this->get_frag_fixed_size();
+    }
+    if (mco > size)
+      m_alt->m_flag.complete_p = true;
+  }
 }
 
-inline HTTPInfo::FragOffset *
+inline HTTPInfo::FragmentDescriptorTable *
 HTTPInfo::get_frag_table()
 {
-  return m_alt ? m_alt->m_frag_offsets : 0;
+  return m_alt ? m_alt->m_fragments : 0;
 }
 
 inline int
-HTTPInfo::get_frag_offset_count()
+HTTPInfo::get_frag_count() const
+{
+  return m_alt ? m_alt->m_frag_count : 0;
+}
+
+inline uint32_t
+HTTPInfo::get_frag_fixed_size() const
+{
+  return m_alt ? m_alt->m_fixed_fragment_size : 0;
+}
+
+inline CryptoHash const &
+HTTPInfo::get_frag_key_of(int64_t offset)
+{
+  return this->get_frag_key(this->get_frag_index_of(offset));
+}
+
+inline CryptoHash const &
+HTTPInfo::get_frag_key(unsigned int idx)
+{
+  return 0 == idx ? m_alt->m_earliest.m_key : this->force_frag_at(idx)->m_key;
+}
+
+inline int64_t
+HTTPInfo::get_frag_offset(unsigned int idx)
+{
+  return 0 == idx ? 0 : (*m_alt->m_fragments)[idx].m_offset;
+}
+
+inline bool
+HTTPInfo::is_frag_cached(unsigned int idx) const
+{
+  return m_alt && ((0 == idx && m_alt->m_earliest.m_flag.cached_p) ||
+                   (m_alt->m_fragments && idx < m_alt->m_fragments->m_n && (*m_alt->m_fragments)[idx].m_flag.cached_p));
+}
+
+inline HTTPRangeSpec::HTTPRangeSpec() : _state(EMPTY)
+{
+}
+
+inline void
+HTTPRangeSpec::clear()
+{
+  _state = EMPTY;
+  RangeBox().swap(_ranges); // force memory drop.
+}
+
+inline bool
+HTTPRangeSpec::isSingle() const
+{
+  return SINGLE == _state;
+}
+
+inline bool
+HTTPRangeSpec::isMulti() const
+{
+  return MULTI == _state;
+}
+
+inline bool
+HTTPRangeSpec::isEmpty() const
+{
+  return EMPTY == _state;
+}
+
+inline bool
+HTTPRangeSpec::isUnsatisfied() const
+{
+  return UNSATISFIABLE == _state;
+}
+
+inline size_t
+HTTPRangeSpec::count() const
+{
+  return SINGLE == _state ? 1 : _ranges.size();
+}
+
+inline bool
+HTTPRangeSpec::hasRanges() const
+{
+  return SINGLE == _state || MULTI == _state;
+}
+
+inline bool
+HTTPRangeSpec::isValid() const
+{
+  return SINGLE == _state || MULTI == _state || EMPTY == _state;
+}
+
+inline HTTPRangeSpec::Range &
+HTTPRangeSpec::Range::invalidate()
+{
+  _min = UINT64_MAX;
+  _max = 1;
+  return *this;
+}
+
+inline bool
+HTTPRangeSpec::Range::isSuffix() const
+{
+  return 0 == _max && _min > 0;
+}
+
+inline bool
+HTTPRangeSpec::Range::isValid() const
+{
+  return _min <= _max || this->isSuffix();
+}
+
+inline uint64_t
+HTTPRangeSpec::Range::size() const
+{
+  return 1 + (_max - _min);
+}
+
+inline uint64_t
+HTTPRangeSpec::size() const
+{
+  uint64_t size = 0;
+  if (this->isSingle())
+    size = _single.size();
+  else if (this->isMulti()) {
+    for (RangeBox::const_iterator spot = _ranges.begin(), limit = _ranges.end(); spot != limit; ++spot)
+      size += spot->size();
+  }
+  return size;
+}
+
+inline bool
+HTTPRangeSpec::Range::apply(uint64_t len)
+{
+  ink_assert(len > 0);
+  bool zret = true; // is this range satisfiable for @a len?
+  if (this->isSuffix()) {
+    _max = len - 1;
+    _min = _min > len ? 0 : len - _min;
+  } else if (_min < len) {
+    _max = MIN(_max, len - 1);
+  } else {
+    this->invalidate();
+    zret = false;
+  }
+  return zret;
+}
+
+inline HTTPRangeSpec &
+HTTPRangeSpec::add(uint64_t low, uint64_t high)
+{
+  return this->add(Range(low, high));
+}
+
+inline HTTPRangeSpec::Range &HTTPRangeSpec::operator[](int n)
+{
+  return SINGLE == _state ? _single : _ranges[n];
+}
+
+inline HTTPRangeSpec::Range const &HTTPRangeSpec::operator[](int n) const
+{
+  return SINGLE == _state ? _single : _ranges[n];
+}
+
+inline HTTPRangeSpec::iterator
+HTTPRangeSpec::begin()
+{
+  switch (_state) {
+  case SINGLE:
+    return &_single;
+  case MULTI:
+    return &(*(_ranges.begin()));
+  default:
+    return NULL;
+  }
+}
+
+inline HTTPRangeSpec::iterator
+HTTPRangeSpec::end()
+{
+  switch (_state) {
+  case SINGLE:
+    return (&_single) + 1;
+  case MULTI:
+    return &(*(_ranges.end()));
+  default:
+    return NULL;
+  }
+}
+
+inline HTTPRangeSpec::const_iterator
+HTTPRangeSpec::begin() const
+{
+  return const_cast<self *>(this)->begin();
+}
+
+inline HTTPRangeSpec::const_iterator
+HTTPRangeSpec::end() const
+{
+  return const_cast<self *>(this)->end();
+}
+
+inline HTTPRangeSpec::Range
+HTTPRangeSpec::getConvexHull() const
+{
+  Range zret;
+  // Compute the convex hull of the original in fragment indices.
+  for (const_iterator spot = this->begin(), limit = this->end(); spot != limit; ++spot) {
+    if (spot->_min < zret._min)
+      zret._min = spot->_min;
+    if (spot->_max > zret._max)
+      zret._max = spot->_max;
+  }
+  return zret;
+}
+
+inline HTTPCacheAlt::FragmentDescriptor &HTTPCacheAlt::FragmentDescriptorTable::operator[](int idx)
+{
+  ink_assert(idx > 0);
+  return *(reinterpret_cast<FragmentDescriptor *>(reinterpret_cast<char *>(this + 1) + sizeof(FragmentDescriptor) * (idx - 1)));
+}
+
+inline size_t
+HTTPCacheAlt::FragmentDescriptorTable::calc_size(uint32_t n)
+{
+  return n < 1 ? 0 : sizeof(FragmentDescriptorTable) + n * sizeof(FragmentDescriptor);
+}
+
+#if 0
+inline
+HTTPCacheAlt::FragmentAccessor::FragmentAccessor(HTTPCacheAlt* alt)
+             : _alt(alt), _table(alt->m_fragments)
+{
+}
+
+inline HTTPCacheAlt::FragmentDescriptor&
+HTTPCacheAlt::FragmentAccessor::operator [] (int idx)
+{
+  ink_assert(idx >= 0);
+  return idx == 0 ? _alt->m_earliest : (*_table)[idx];
+}
+
+inline uint32_t
+HTTPCacheAlt::FragmentAccessor::get_initial_cached_index() const
 {
-  return m_alt ? m_alt->m_frag_offset_count : 0;
+  return _table ? _table->m_cached_idx : 0;
 }
+#endif
 
 #endif /* __HTTP_H__ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/528eab64/proxy/http/HttpCacheSM.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpCacheSM.cc b/proxy/http/HttpCacheSM.cc
index c259b38..2ba4aee 100644
--- a/proxy/http/HttpCacheSM.cc
+++ b/proxy/http/HttpCacheSM.cc
@@ -267,6 +267,37 @@ HttpCacheSM::open_read(const HttpCacheKey *key, URL *url, HTTPHdr *hdr, CacheLoo
   }
 }
 
+int
+HttpCacheSM::state_cache_open_partial_read(int evid, void *data)
+{
+  if (!open_read_cb)
+    return this->state_cache_open_read(evid, data);
+  Debug("amc", "[HttpCacheSM::state_cache_open_partial_read] second round");
+  return VC_EVENT_DONE;
+}
+
+Action *
+HttpCacheSM::open_partial_read(HTTPHdr *client_request_hdr)
+{
+  // Simple because this requires an active write VC so we know the object is there (no retries).
+  ink_assert(NULL != cache_write_vc);
+
+  // If this is a partial fill there will be a cache read VC. Resetting it to be used is challenging
+  // because it requires digging in to the internals of the VC or expanding its interface. At present
+  // it's better to just close it and re-open one that we know is valid with regard to the write VC.
+  this->close_read();
+
+  SET_HANDLER(&HttpCacheSM::state_cache_open_partial_read);
+  open_read_cb = false;
+
+  Action *action_handle = cacheProcessor.open_read(this, cache_write_vc, client_request_hdr);
+
+  if (action_handle != ACTION_RESULT_DONE)
+    pending_action = action_handle;
+
+  return open_read_cb ? ACTION_RESULT_DONE : &captive_action;
+}
+
 Action *
 HttpCacheSM::open_write(const HttpCacheKey *key, URL *url, HTTPHdr *request, CacheHTTPInfo *old_info, time_t pin_in_cache,
                         bool retry, bool allow_multiple)


Mime
View raw message