trafficserver-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zw...@apache.org
Subject [trafficserver] 02/03: Revert "Revert "TS-4612: Proposal: InactivityCop Optimize""
Date Tue, 25 Apr 2017 14:03:26 GMT
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 7.1.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit fd008c3a4035ff3b7c70b877c47c5ac3f45b1667
Author: Leif Hedstrom <zwoop@apache.org>
AuthorDate: Mon Apr 24 15:17:10 2017 -0600

    Revert "Revert "TS-4612: Proposal: InactivityCop Optimize""
    
    This reverts commit db596a1c63e1cc86cfd066ed08134750dabe7d66.
---
 iocore/cluster/ClusterHandlerBase.cc |  1 +
 iocore/net/P_UnixNet.h               |  5 ++-
 iocore/net/P_UnixNetVConnection.h    | 41 ++----------------
 iocore/net/UnixNet.cc                | 80 ++++++++++++------------------------
 iocore/net/UnixNetAccept.cc          |  1 +
 iocore/net/UnixNetVConnection.cc     | 37 ++++++++++++++++-
 6 files changed, 70 insertions(+), 95 deletions(-)

diff --git a/iocore/cluster/ClusterHandlerBase.cc b/iocore/cluster/ClusterHandlerBase.cc
index 41da0a5..6202a41 100644
--- a/iocore/cluster/ClusterHandlerBase.cc
+++ b/iocore/cluster/ClusterHandlerBase.cc
@@ -1018,6 +1018,7 @@ ClusterHandler::startClusterEvent(int event, Event *e)
       if (lock.is_locked() && lock1.is_locked()) {
         vc->ep.stop();
         vc->nh->open_list.remove(vc);
+        vc->nh->cop_list.remove(vc);
         vc->thread = nullptr;
         if (vc->nh->read_ready_list.in(vc))
           vc->nh->read_ready_list.remove(vc);
diff --git a/iocore/net/P_UnixNet.h b/iocore/net/P_UnixNet.h
index 4637f00..f514ce8 100644
--- a/iocore/net/P_UnixNet.h
+++ b/iocore/net/P_UnixNet.h
@@ -195,6 +195,7 @@ public:
   uint32_t inactive_threashold_in;
   uint32_t transaction_no_activity_timeout_in;
   uint32_t keep_alive_no_activity_timeout_in;
+  uint32_t default_inactivity_timeout;
 
   int startNetEvent(int event, Event *data);
   int mainNetEvent(int event, Event *data);
@@ -406,7 +407,7 @@ read_disable(NetHandler *nh, UnixNetVConnection *vc)
   }
 #else
   if (!vc->write.enabled) {
-    vc->next_inactivity_timeout_at = 0;
+    vc->set_inactivity_timeout(0);
     Debug("socket", "read_disable updating inactivity_at %" PRId64 ", NetVC=%p", vc->next_inactivity_timeout_at,
vc);
   }
 #endif
@@ -427,7 +428,7 @@ write_disable(NetHandler *nh, UnixNetVConnection *vc)
   }
 #else
   if (!vc->read.enabled) {
-    vc->next_inactivity_timeout_at = 0;
+    vc->set_inactivity_timeout(0);
     Debug("socket", "write_disable updating inactivity_at %" PRId64 ", NetVC=%p", vc->next_inactivity_timeout_at,
vc);
   }
 #endif
diff --git a/iocore/net/P_UnixNetVConnection.h b/iocore/net/P_UnixNetVConnection.h
index 9a238e0..e5b5d25 100644
--- a/iocore/net/P_UnixNetVConnection.h
+++ b/iocore/net/P_UnixNetVConnection.h
@@ -370,41 +370,6 @@ UnixNetVConnection::get_inactivity_timeout()
 }
 
 TS_INLINE void
-UnixNetVConnection::set_inactivity_timeout(ink_hrtime timeout_in)
-{
-  Debug("socket", "Set inactive timeout=%" PRId64 ", for NetVC=%p", timeout_in, this);
-  inactivity_timeout_in = timeout_in;
-#ifdef INACTIVITY_TIMEOUT
-
-  if (inactivity_timeout)
-    inactivity_timeout->cancel_action(this);
-  if (inactivity_timeout_in) {
-    if (read.enabled) {
-      ink_assert(read.vio.mutex->thread_holding == this_ethread() && thread);
-      if (read.vio.mutex->thread_holding == thread)
-        inactivity_timeout = thread->schedule_in_local(this, inactivity_timeout_in);
-      else
-        inactivity_timeout = thread->schedule_in(this, inactivity_timeout_in);
-    } else if (write.enabled) {
-      ink_assert(write.vio.mutex->thread_holding == this_ethread() && thread);
-      if (write.vio.mutex->thread_holding == thread)
-        inactivity_timeout = thread->schedule_in_local(this, inactivity_timeout_in);
-      else
-        inactivity_timeout = thread->schedule_in(this, inactivity_timeout_in);
-    } else
-      inactivity_timeout = 0;
-  } else
-    inactivity_timeout = 0;
-#else
-  if (timeout_in) {
-    next_inactivity_timeout_at = Thread::get_hrtime() + timeout_in;
-  } else {
-    next_inactivity_timeout_at = 0;
-  }
-#endif
-}
-
-TS_INLINE void
 UnixNetVConnection::set_active_timeout(ink_hrtime timeout_in)
 {
   Debug("socket", "Set active timeout=%" PRId64 ", NetVC=%p", timeout_in, this);
@@ -430,7 +395,7 @@ UnixNetVConnection::set_active_timeout(ink_hrtime timeout_in)
   } else
     active_timeout = 0;
 #else
-  next_activity_timeout_at   = Thread::get_hrtime() + timeout_in;
+  next_activity_timeout_at = Thread::get_hrtime() + timeout_in;
 #endif
 }
 
@@ -446,7 +411,7 @@ UnixNetVConnection::cancel_inactivity_timeout()
     inactivity_timeout = nullptr;
   }
 #else
-  next_inactivity_timeout_at = 0;
+  set_inactivity_timeout(0);
 #endif
 }
 
@@ -462,7 +427,7 @@ UnixNetVConnection::cancel_active_timeout()
     active_timeout = nullptr;
   }
 #else
-  next_activity_timeout_at   = 0;
+  next_activity_timeout_at = 0;
 #endif
 }
 
diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc
index d13badc..46d8f75 100644
--- a/iocore/net/UnixNet.cc
+++ b/iocore/net/UnixNet.cc
@@ -38,20 +38,10 @@ extern "C" void fd_reify(struct ev_loop *);
 // INKqa10496
 // One Inactivity cop runs on each thread once every second and
 // loops through the list of NetVCs and calls the timeouts
-int update_cop_config(const char *name, RecDataT data_type, RecData data, void *cookie);
-
 class InactivityCop : public Continuation
 {
 public:
-  explicit InactivityCop(Ptr<ProxyMutex> &m) : Continuation(m.get()), default_inactivity_timeout(0)
-  {
-    SET_HANDLER(&InactivityCop::check_inactivity);
-    REC_ReadConfigInteger(default_inactivity_timeout, "proxy.config.net.default_inactivity_timeout");
-    Debug("inactivity_cop", "default inactivity timeout is set to: %d", default_inactivity_timeout);
-
-    RecRegisterConfigUpdateCb("proxy.config.net.default_inactivity_timeout", update_cop_config,
(void *)this);
-  }
-
+  explicit InactivityCop(Ptr<ProxyMutex> &m) : Continuation(m.get()) { SET_HANDLER(&InactivityCop::check_inactivity);
}
   int
   check_inactivity(int event, Event *e)
   {
@@ -60,13 +50,8 @@ public:
     NetHandler &nh = *get_NetHandler(this_ethread());
 
     Debug("inactivity_cop_check", "Checking inactivity on Thread-ID #%d", this_ethread()->id);
-    // Copy the list and use pop() to catch any closes caused by callbacks.
-    forl_LL(UnixNetVConnection, vc, nh.open_list)
-    {
-      if (vc->thread == this_ethread()) {
-        nh.cop_list.push(vc);
-      }
-    }
+    // The rest NetVCs in cop_list which are not triggered between InactivityCop runs.
+    // Use pop() to catch any closes caused by callbacks.
     while (UnixNetVConnection *vc = nh.cop_list.pop()) {
       // If we cannot get the lock don't stop just keep cleaning
       MUTEX_TRY_LOCK(lock, vc->mutex, this_ethread());
@@ -80,17 +65,6 @@ public:
         continue;
       }
 
-      // set a default inactivity timeout if one is not set
-      if (vc->next_inactivity_timeout_at == 0 && default_inactivity_timeout >
0) {
-        Debug("inactivity_cop", "vc: %p inactivity timeout not set, setting a default of
%d", vc, default_inactivity_timeout);
-        vc->set_inactivity_timeout(HRTIME_SECONDS(default_inactivity_timeout));
-        NET_INCREMENT_DYN_STAT(default_inactivity_timeout_stat);
-      } else {
-        Debug("inactivity_cop_verbose", "vc: %p now: %" PRId64 " timeout at: %" PRId64 "
timeout in: %" PRId64, vc,
-              ink_hrtime_to_sec(now), ink_hrtime_to_sec(vc->next_inactivity_timeout_at),
-              ink_hrtime_to_sec(vc->inactivity_timeout_in));
-      }
-
       if (vc->next_inactivity_timeout_at && vc->next_inactivity_timeout_at
< now) {
         if (nh.keep_alive_queue.in(vc)) {
           // only stat if the connection is in keep-alive, there can be other inactivity
timeouts
@@ -103,6 +77,18 @@ public:
         vc->handleEvent(EVENT_IMMEDIATE, e);
       }
     }
+    // The cop_list is empty now.
+    // Let's reload the cop_list from open_list again.
+    forl_LL(UnixNetVConnection, vc, nh.open_list)
+    {
+      if (vc->thread == this_ethread()) {
+        nh.cop_list.push(vc);
+      }
+    }
+    // NetHandler will remove NetVC from cop_list if it is triggered.
+    // As the NetHandler runs, the number of NetVCs in the cop_list is decreasing.
+    // NetHandler runs 100 times maximum between InactivityCop runs.
+    // Therefore we don't have to check all the NetVCs as much as open_list.
 
     // Cleanup the active and keep-alive queues periodically
     nh.manage_active_queue(true); // close any connections over the active timeout
@@ -110,33 +96,8 @@ public:
 
     return 0;
   }
-
-  void
-  set_default_timeout(const int x)
-  {
-    default_inactivity_timeout = x;
-  }
-
-private:
-  int default_inactivity_timeout; // only used when one is not set for some bad reason
 };
 
-int
-update_cop_config(const char *name, RecDataT data_type ATS_UNUSED, RecData data, void *cookie)
-{
-  InactivityCop *cop = static_cast<InactivityCop *>(cookie);
-  ink_assert(cop != NULL);
-
-  if (cop != NULL) {
-    if (strcmp(name, "proxy.config.net.default_inactivity_timeout") == 0) {
-      Debug("inactivity_cop_dynamic", "proxy.config.net.default_inactivity_timeout updated
to %" PRId64, data.rec_int);
-      cop->set_default_timeout(data.rec_int);
-    }
-  }
-
-  return REC_ERR_OKAY;
-}
-
 #endif
 
 PollCont::PollCont(Ptr<ProxyMutex> &m, int pt)
@@ -336,6 +297,10 @@ update_nethandler_config(const char *name, RecDataT data_type ATS_UNUSED,
RecDat
       Debug("net_queue", "proxy.config.net.keep_alive_no_activity_timeout_in updated to %"
PRId64, data.rec_int);
       nh->keep_alive_no_activity_timeout_in = data.rec_int;
     }
+    if (strcmp(name, "proxy.config.net.default_inactivity_timeout") == 0) {
+      Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to %" PRId64,
data.rec_int);
+      nh->default_inactivity_timeout = data.rec_int;
+    }
   }
 
   if (update_per_thread_configuration == true) {
@@ -358,18 +323,21 @@ NetHandler::startNetEvent(int event, Event *e)
   REC_ReadConfigInt32(inactive_threashold_in, "proxy.config.net.inactive_threashold_in");
   REC_ReadConfigInt32(transaction_no_activity_timeout_in, "proxy.config.net.transaction_no_activity_timeout_in");
   REC_ReadConfigInt32(keep_alive_no_activity_timeout_in, "proxy.config.net.keep_alive_no_activity_timeout_in");
+  REC_ReadConfigInt32(default_inactivity_timeout, "proxy.config.net.default_inactivity_timeout");
 
   RecRegisterConfigUpdateCb("proxy.config.net.max_connections_in", update_nethandler_config,
(void *)this);
   RecRegisterConfigUpdateCb("proxy.config.net.max_active_connections_in", update_nethandler_config,
(void *)this);
   RecRegisterConfigUpdateCb("proxy.config.net.inactive_threashold_in", update_nethandler_config,
(void *)this);
   RecRegisterConfigUpdateCb("proxy.config.net.transaction_no_activity_timeout_in", update_nethandler_config,
(void *)this);
   RecRegisterConfigUpdateCb("proxy.config.net.keep_alive_no_activity_timeout_in", update_nethandler_config,
(void *)this);
+  RecRegisterConfigUpdateCb("proxy.config.net.default_inactivity_timeout", update_nethandler_config,
(void *)this);
 
   Debug("net_queue", "proxy.config.net.max_connections_in updated to %d", max_connections_in);
   Debug("net_queue", "proxy.config.net.max_active_connections_in updated to %d", max_connections_active_in);
   Debug("net_queue", "proxy.config.net.inactive_threashold_in updated to %d", inactive_threashold_in);
   Debug("net_queue", "proxy.config.net.transaction_no_activity_timeout_in updated to %d",
transaction_no_activity_timeout_in);
   Debug("net_queue", "proxy.config.net.keep_alive_no_activity_timeout_in updated to %d",
keep_alive_no_activity_timeout_in);
+  Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to %d", default_inactivity_timeout);
 
   configure_per_thread();
 
@@ -477,6 +445,10 @@ NetHandler::mainNetEvent(int event, Event *e)
     epd = (EventIO *)get_ev_data(pd, x);
     if (epd->type == EVENTIO_READWRITE_VC) {
       vc = epd->data.vc;
+      // Remove triggered NetVC from cop_list because it won't be timeout before next InactivityCop
runs.
+      if (cop_list.in(vc)) {
+        cop_list.remove(vc);
+      }
       if (get_ev_events(pd, x) & EVENTIO_READ) {
         vc->read.triggered = 1;
         if (get_ev_events(pd, x) & EVENTIO_ERROR) {
diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc
index a3094fb..d30ebe5 100644
--- a/iocore/net/UnixNetAccept.cc
+++ b/iocore/net/UnixNetAccept.cc
@@ -428,6 +428,7 @@ NetAccept::acceptFastEvent(int event, void *ep)
     }
 
     ink_assert(vc->nh->mutex->thread_holding == this_ethread());
+    vc->set_inactivity_timeout(0);
     vc->nh->open_list.enqueue(vc);
 
 #ifdef USE_EDGE_TRIGGER
diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc
index ae9f10c..81bfde2 100644
--- a/iocore/net/UnixNetVConnection.cc
+++ b/iocore/net/UnixNetVConnection.cc
@@ -1205,6 +1205,7 @@ UnixNetVConnection::acceptEvent(int event, Event *e)
     return EVENT_DONE;
   }
 
+  set_inactivity_timeout(0);
   nh->open_list.enqueue(this);
 
 #ifdef USE_EDGE_TRIGGER
@@ -1419,9 +1420,9 @@ UnixNetVConnection::connectUp(EThread *t, int fd)
   SET_HANDLER(&UnixNetVConnection::mainEvent);
 
   nh = get_NetHandler(t);
+  set_inactivity_timeout(0);
   nh->open_list.enqueue(this);
 
-  ink_assert(!inactivity_timeout_in);
   ink_assert(!active_timeout_in);
   this->set_local_addr();
   action_.continuation->handleEvent(NET_EVENT_OPEN, this);
@@ -1484,6 +1485,40 @@ UnixNetVConnection::apply_options()
   con.apply_options(options);
 }
 
+TS_INLINE void
+UnixNetVConnection::set_inactivity_timeout(ink_hrtime timeout_in)
+{
+  Debug("socket", "Set inactive timeout=%" PRId64 ", for NetVC=%p", timeout_in, this);
+#ifdef INACTIVITY_TIMEOUT
+  if (inactivity_timeout)
+    inactivity_timeout->cancel_action(this);
+  if (timeout_in) {
+    inactivity_timeout_in = timeout_in;
+    if (read.enabled) {
+      ink_assert(read.vio.mutex->thread_holding == this_ethread() && thread);
+      if (read.vio.mutex->thread_holding == thread)
+        inactivity_timeout = thread->schedule_in_local(this, inactivity_timeout_in);
+      else
+        inactivity_timeout = thread->schedule_in(this, inactivity_timeout_in);
+    } else if (write.enabled) {
+      ink_assert(write.vio.mutex->thread_holding == this_ethread() && thread);
+      if (write.vio.mutex->thread_holding == thread)
+        inactivity_timeout = thread->schedule_in_local(this, inactivity_timeout_in);
+      else
+        inactivity_timeout = thread->schedule_in(this, inactivity_timeout_in);
+    } else
+      inactivity_timeout = 0;
+  } else
+    inactivity_timeout = 0;
+#else
+  if (timeout_in == 0) {
+    // set default inactivity timeout
+    inactivity_timeout_in = timeout_in = HRTIME_SECONDS(nh->default_inactivity_timeout);
+  }
+  next_inactivity_timeout_at = Thread::get_hrtime() + timeout_in;
+#endif
+}
+
 /*
  * Close down the current netVC.  Save aside the socket and SSL information
  * and create new netVC in the current thread/netVC

-- 
To stop receiving notification emails like this one, please contact
"commits@trafficserver.apache.org" <commits@trafficserver.apache.org>.

Mime
View raw message