jackrabbit-oak-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "Thomas Mueller (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (OAK-3168) SegmentCache flushes Segment on update
Date Thu, 30 Jul 2015 14:28:04 GMT

    [ https://issues.apache.org/jira/browse/OAK-3168?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14647676#comment-14647676
] 

Thomas Mueller commented on OAK-3168:
-------------------------------------

Possibly patch (just for the cache; no tests yet, and SegmentCache not yet changed):


{noformat}
--- src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java	(revision 1692465)
+++ src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java	(working copy)
@@ -34,6 +34,7 @@
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.CacheStats;
 import com.google.common.cache.LoadingCache;
+import com.google.common.cache.RemovalCause;
 import com.google.common.cache.Weigher;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -93,10 +94,11 @@
          * The method may be called twice for the same key (first if the entry
          * is resident, and later if the entry is non-resident).
          * 
+         * @param cause the removal cause
          * @param key the evicted item's key
          * @param value the evicted item's value or {@code null} if non-resident
          */
-        void evicted(@Nonnull K key, @Nullable V value);
+        void evicted(RemovalCause cause, @Nonnull K key, @Nullable V value);
     }
 
     /**
@@ -205,13 +207,13 @@
         }
     }
 
-    void evicted(Entry<K, V> entry) {
+    void evicted(RemovalCause cause, Entry<K, V> entry) {
         if (evicted == null) {
             return;
         }
         K key = entry.key;
         if (key != null) {
-            evicted.evicted(key, entry.value);
+            evicted.evicted(cause, key, entry.value);
         }
     }
 
@@ -300,9 +302,18 @@
      * @throws ExecutionException
      */
     @Override
-    public V get(K key) throws ExecutionException {
+    public V get(final K key) throws ExecutionException {
         int hash = getHash(key);
-        return getSegment(hash).get(key, hash, loader);
+        if (loader != null) {
+            return getSegment(hash).get(key, hash, 
+                    new Callable<V>() {
+                        @Override
+                        public V call() throws Exception {
+                            return loader.load(key);
+                        }
+                });            
+        }
+        return getSegment(hash).get(key, hash);
     }
 
     /**
@@ -383,7 +394,7 @@
     @Override
     public void invalidate(Object key) {
         int hash = getHash(key);
-        getSegment(hash).invalidate(key, hash);
+        getSegment(hash).invalidate(RemovalCause.EXPLICIT, key, hash);
     }
 
     /**
@@ -777,16 +788,16 @@
         public void evictedAll() {
             for (Entry<K, V> e = stack.stackNext; e != stack; e = e.stackNext) {
                 if (e.value != null) {
-                    cache.evicted(e);
+                    cache.evicted(RemovalCause.EXPLICIT, e);
                 }
             }
             for (Entry<K, V> e = queue.queueNext; e != queue; e = e.queueNext) {
                 if (e.stackNext == null) {
-                    cache.evicted(e);
+                    cache.evicted(RemovalCause.EXPLICIT, e);
                 }
             }
             for (Entry<K, V> e = queue2.queueNext; e != queue2; e = e.queueNext) {
-                cache.evicted(e);
+                cache.evicted(RemovalCause.EXPLICIT, e);
             }
         }
 
@@ -993,36 +1004,6 @@
             return value;
         }
 
-        V get(K key, int hash, CacheLoader<K, V> loader) throws ExecutionException
{
-            // avoid synchronization if it's in the cache
-            V value = get(key, hash);
-            if (value != null) {
-                return value;
-            }
-            if (loader == null) {
-                return null;
-            }
-            synchronized (this) {
-                value = get(key, hash);
-                if (value != null) {
-                    return value;
-                }
-                long start = System.nanoTime();
-                try {
-                    value = loader.load(key);
-                    loadSuccessCount++;
-                } catch (Exception e) {
-                    loadExceptionCount++;
-                    throw new ExecutionException(e);
-                } finally {
-                    long time = System.nanoTime() - start;
-                    totalLoadTime += time;
-                }
-                put(key, hash, value, cache.sizeOf(key, value));
-                return value;
-            }
-        }
-
         synchronized V replace(K key, int hash, V value, int memory) {
             if (containsKey(key, hash)) {
                 return put(key, hash, value, memory);
@@ -1042,7 +1023,7 @@
         synchronized boolean remove(Object key, int hash, Object value) {
             V old = get(key, hash);
             if (old != null && old.equals(value)) {
-                invalidate(key, hash);
+                invalidate(RemovalCause.EXPLICIT, key, hash);
                 return true;
             }
             return false;
@@ -1051,7 +1032,7 @@
         synchronized V remove(Object key, int hash) {
             V old = get(key, hash);
             // even if old is null, there might still be a cold entry
-            invalidate(key, hash);
+            invalidate(RemovalCause.EXPLICIT, key, hash);
             return old;
         }
 
@@ -1111,7 +1092,7 @@
                 old = null;
             } else {
                 old = e.value;
-                invalidate(key, hash);
+                invalidate(RemovalCause.REPLACED, key, hash);
             }
             e = new Entry<K, V>();
             e.key = key;
@@ -1137,10 +1118,11 @@
          * Remove an entry. Both resident and non-resident entries can be
          * removed.
          *
+         * @param cause the removal cause
          * @param key the key (may not be null)
          * @param hash the hash
          */
-        synchronized void invalidate(Object key, int hash) {
+        synchronized void invalidate(RemovalCause cause, Object key, int hash) {
             Entry<K, V>[] array = entries;
             int mask = array.length - 1;
             int index = hash & mask;
@@ -1180,7 +1162,7 @@
                 removeFromQueue(e);
             }
             pruneStack();
-            cache.evicted(e);
+            cache.evicted(cause, e);
         }
 
         /**
@@ -1208,7 +1190,7 @@
                 usedMemory -= e.memory;
                 evictionCount++;
                 removeFromQueue(e);
-                cache.evicted(e);
+                cache.evicted(RemovalCause.SIZE, e);
                 e.value = null;
                 e.memory = 0;
                 addToQueue(queue2, e);
@@ -1216,7 +1198,7 @@
                 while (queue2Size + queue2Size > stackSize) {
                     e = queue2.queuePrev;
                     int hash = getHash(e.key);
-                    invalidate(e.key, hash);
+                    invalidate(RemovalCause.SIZE, e.key, hash);
                 }
             }
         }

{noformat}

> SegmentCache flushes Segment on update
> --------------------------------------
>
>                 Key: OAK-3168
>                 URL: https://issues.apache.org/jira/browse/OAK-3168
>             Project: Jackrabbit Oak
>          Issue Type: Bug
>          Components: segmentmk
>            Reporter: Alex Parvulescu
>            Assignee: Alex Parvulescu
>             Fix For: 1.3.5
>
>
> The SegmentCache currently uses the cache eviction call to remove the Segment instance
from memory to help keep the cache memory requirements under control [0].
> What I've noticed though, is that for a cache update (existing key) there will also be
an eviction call happening, which results in a lot of extra IO pressure on the SegmentStore
which not only is not able to cache the segment, but is forced to reload it multiple times
as the reference gets nullified after each load.
> This comes from the sampling behavior of the SegmentId: it will not hit the cache each
time it needs to load a new Segment, but rather load it from IO and (re)place it in the cache,
based on a sampling rate [1].
> Now I see 2 options:
>  * change the cache code to _not_ call the eviction callback on updates (or allow disabling
this call on updates)
>  * change the SegmentTracker code to add the value to the cache only if it's not there
as Segments are immutable, so no harm done.
> Raised this issue offline with [~tmueller], [~mduerig] first and as I understand [~mduerig]
is in favor of option one, while [~tmueller] proposed that the Lirs cache impl should be inline
with what the guava cache does, and depending on that we could choose the right fix here.

> Hope this covers everything.
> [0] https://github.com/apache/jackrabbit-oak/blob/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java#L133
> [1] https://github.com/apache/jackrabbit-oak/blob/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java#L135



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Mime
View raw message