From 027a5816287e4f268c1ecc3c8aa4ee249a093462 Mon Sep 17 00:00:00 2001 From: Bert Massop Date: Sun, 1 Dec 2024 17:16:41 +0100 Subject: [PATCH 1/6] Remove effectively unused archive restart code in ArchiveManager The `lastSize` and `lastHash` on the ArchiveStoreContext appear to keep track of the last observed size and hash for an archive data bucket, but they are only ever written by the ArchiveManager when they already have a value - which they never do. Therefore, there are no code paths for ArchiveRestartedException to be thrown. This has been the case for at least since 2005, and apparently it works fine this way. Inline the default values of these fields in ArchiveManager and prune the now unreachable code. --- src/freenet/client/ArchiveManager.java | 40 ++++---------------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/src/freenet/client/ArchiveManager.java b/src/freenet/client/ArchiveManager.java index 911c50f3db7..aab4dd97f2f 100644 --- a/src/freenet/client/ArchiveManager.java +++ b/src/freenet/client/ArchiveManager.java @@ -9,7 +9,6 @@ import java.io.OutputStream; import java.io.PipedInputStream; import java.io.PipedOutputStream; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Set; @@ -33,7 +32,6 @@ import freenet.support.compress.CompressionOutputSizeException; import freenet.support.compress.Compressor; import freenet.support.compress.Compressor.COMPRESSOR_TYPE; -import freenet.support.io.BucketTools; import freenet.support.io.Closer; import freenet.support.io.SkipShieldingInputStream; import net.contrapunctus.lzma.LzmaInputStream; @@ -250,39 +248,15 @@ synchronized void removeCachedItem(ArchiveStoreItem item) { * @param element A particular element that the caller is especially interested in, or null. * @param callback A callback to be called if we find that element, or if we don't. * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc. - * @throws ArchiveRestartException - * @throws ArchiveRestartException If the request needs to be restarted because the archive - * changed. */ - public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException, ArchiveRestartException { + public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { logMINOR = Logger.shouldLog(LogLevel.MINOR, this); MutableBoolean gotElement = element != null ? new MutableBoolean() : null; if(logMINOR) Logger.minor(this, "Extracting "+key); ctx.removeAllCachedItems(this); // flush cache anyway - final long expectedSize = ctx.getLastSize(); final long archiveSize = data.size(); - /** Set if we need to throw a RestartedException rather than returning success, - * after we have unpacked everything. - */ - boolean throwAtExit = false; - if((expectedSize != -1) && (archiveSize != expectedSize)) { - throwAtExit = true; - ctx.setLastSize(archiveSize); - } - byte[] expectedHash = ctx.getLastHash(); - if(expectedHash != null) { - byte[] realHash; - try { - realHash = BucketTools.hash(data); - } catch (IOException e) { - throw new ArchiveFailureException("Error reading archive data: "+e, e); - } - if(!Arrays.equals(realHash, expectedHash)) - throwAtExit = true; - ctx.setLastHash(realHash); - } if(archiveSize > archiveContext.maxArchiveSize) throw new ArchiveFailureException("Archive too big ("+archiveSize+" > "+archiveContext.maxArchiveSize+")!"); @@ -320,7 +294,7 @@ else if(logMINOR) public void run() { InputStream is = null; try { - Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress(is = data.getInputStream(), os, data.size(), expectedSize); + Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress(is = data.getInputStream(), os, data.size(), -1); } catch (CompressionOutputSizeException e) { Logger.error(this, "Failed to decompress archive: "+e, e); wrapper.set(e); @@ -348,10 +322,10 @@ public void run() { } if(ARCHIVE_TYPE.ZIP == archiveType) { - handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context); + handleZIPArchive(ctx, key, is, element, callback, gotElement, context); } else if(ARCHIVE_TYPE.TAR == archiveType) { // COMPRESS-449 workaround, see https://freenet.mantishub.io/view.php?id=6921 - handleTARArchive(ctx, key, new SkipShieldingInputStream(is), element, callback, gotElement, throwAtExit, context); + handleTARArchive(ctx, key, new SkipShieldingInputStream(is), element, callback, gotElement, context); } else { throw new ArchiveFailureException("Unknown or unsupported archive algorithm " + archiveType); } @@ -366,7 +340,7 @@ public void run() { } } - private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, boolean throwAtExit, ClientContext context) throws ArchiveFailureException, ArchiveRestartException { + private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, ClientContext context) throws ArchiveFailureException { if(logMINOR) Logger.minor(this, "Handling a TAR Archive"); TarArchiveInputStream tarIS = null; try { @@ -439,7 +413,6 @@ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre generateMetadata(ctx, key, names, gotElement, element, callback, context); trimStoredData(); } - if(throwAtExit) throw new ArchiveRestartException("Archive changed on re-fetch"); if((!gotElement.value) && element != null) callback.notInArchive(context); @@ -451,7 +424,7 @@ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre } } - private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, boolean throwAtExit, ClientContext context) throws ArchiveFailureException, ArchiveRestartException { + private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, ClientContext context) throws ArchiveFailureException { if(logMINOR) Logger.minor(this, "Handling a ZIP Archive"); ZipInputStream zis = null; try { @@ -519,7 +492,6 @@ private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre generateMetadata(ctx, key, names, gotElement, element, callback, context); trimStoredData(); } - if(throwAtExit) throw new ArchiveRestartException("Archive changed on re-fetch"); if((!gotElement.value) && element != null) callback.notInArchive(context); From c91748f02d4db3b3c5b55ccb4fa16ee81e4651ea Mon Sep 17 00:00:00 2001 From: Bert Massop Date: Sun, 1 Dec 2024 17:16:42 +0100 Subject: [PATCH 2/6] Move archive entry caching out of ArchiveManager Remove the double bookkeeping of archive entries in ArchiveManager and ArchiveStoreContext and replace it with a central cache implementation. This simplifies the locking (for which there were instructions in the comments, but these were ignored in the implementation) and reduces the number of arguments being passed around. This also removes the "remove all cached items for an archive" logic that would likely lead to memory leaks as not all references were consistently released, and fixes some potential race conditions in claiming the buckets. --- src/freenet/client/ArchiveBucketCache.java | 113 ++++++++ src/freenet/client/ArchiveHandlerImpl.java | 3 +- src/freenet/client/ArchiveKey.java | 36 --- src/freenet/client/ArchiveManager.java | 242 +++--------------- src/freenet/client/ArchiveStoreContext.java | 117 --------- src/freenet/client/ArchiveStoreItem.java | 55 ---- src/freenet/client/ErrorArchiveStoreItem.java | 53 ---- src/freenet/client/RealArchiveStoreItem.java | 91 ------- .../client/ArchiveBucketCacheTest.java | 137 ++++++++++ 9 files changed, 289 insertions(+), 558 deletions(-) create mode 100644 src/freenet/client/ArchiveBucketCache.java delete mode 100644 src/freenet/client/ArchiveKey.java delete mode 100644 src/freenet/client/ArchiveStoreContext.java delete mode 100644 src/freenet/client/ArchiveStoreItem.java delete mode 100644 src/freenet/client/ErrorArchiveStoreItem.java delete mode 100644 src/freenet/client/RealArchiveStoreItem.java create mode 100644 test/freenet/client/ArchiveBucketCacheTest.java diff --git a/src/freenet/client/ArchiveBucketCache.java b/src/freenet/client/ArchiveBucketCache.java new file mode 100644 index 00000000000..c589b99c8e9 --- /dev/null +++ b/src/freenet/client/ArchiveBucketCache.java @@ -0,0 +1,113 @@ +package freenet.client; + +import freenet.keys.FreenetURI; +import freenet.support.LRUMap; +import freenet.support.api.Bucket; +import freenet.support.io.MultiReaderBucket; + +class ArchiveBucketCache { + /** + * Maximum number of cached buckets. + */ + private final int maxBuckets; + + /** + * Maximum cached data in bytes. + */ + private final long maxBytes; + + /** + * Underlying cached items by cache key (as [uri]:[filename]). + */ + private final LRUMap cache = new LRUMap<>(); + + /** + * Currently cached data in bytes. + */ + private long currentBytes; + + ArchiveBucketCache(int maxBuckets, long maxBytes) { + this.maxBuckets = maxBuckets; + this.maxBytes = maxBytes; + } + + /** + * Add an item to the cache and return a claimed bucket for the same item. + * The returned bucket (if not null) must be freed by the caller. + * + * @return a read-only bucket referencing the cached data, or null if the entry was not found + */ + synchronized Bucket acquire(FreenetURI uri, String filename) { + String key = createCacheKey(uri, filename); + CachedBucket item = cache.get(key); + if (item != null) { + // Promote the item to the top of the LRU. + cache.push(key, item); + // Acquire the bucket while holding lock to ensure the item is not yet released. + return acquire(item); + } + return null; + } + + /** + * Add an item to the cache and return an acquired bucket for the same item. + * The cache assumes responsibility of freeing the provided bucket, the caller should not free it. + * The returned bucket must eventually be freed by the caller. + * + * @return a read-only bucket referencing the same data as the provided bucket + */ + synchronized Bucket addAndAcquire(FreenetURI uri, String filename, Bucket bucket) { + // Store the item in the cache, it will be released when it gets evicted from the cache. + String key = createCacheKey(uri, filename); + CachedBucket item = new CachedBucket(bucket); + CachedBucket oldItem = cache.push(key, item); + onAdded(item); + + // Acquire the bucket now to keep the item alive, even if the item is evicted immediately after we return. + Bucket acquired = acquire(item); + + // Cleanup the evicted item (if any) and evict the least recently items to stay within size limits. + onEvicted(oldItem); + evictLeastRecentlyUsedItems(); + + return acquired; + } + + private void evictLeastRecentlyUsedItems() { + while (!cache.isEmpty() && (currentBytes > maxBytes || cache.size() > maxBuckets)) { + CachedBucket oldItem = cache.popValue(); + onEvicted(oldItem); + } + } + + private void onAdded(CachedBucket item) { + currentBytes += item.size; + } + + private void onEvicted(CachedBucket item) { + if (item != null) { + currentBytes -= item.size; + item.keepAliveReference.free(); + } + } + + private static Bucket acquire(CachedBucket item) { + return item.multiReaderBucket.getReaderBucket(); + } + + private static String createCacheKey(FreenetURI uri, String filename) { + return uri.toASCIIString() + ":" + filename; + } + + private static class CachedBucket { + private final MultiReaderBucket multiReaderBucket; + private final Bucket keepAliveReference; + private final long size; + + private CachedBucket(Bucket bucket) { + this.multiReaderBucket = new MultiReaderBucket(bucket); + this.keepAliveReference = multiReaderBucket.getReaderBucket(); + this.size = bucket.size(); + } + } +} diff --git a/src/freenet/client/ArchiveHandlerImpl.java b/src/freenet/client/ArchiveHandlerImpl.java index 13c5955db2c..8535f145334 100644 --- a/src/freenet/client/ArchiveHandlerImpl.java +++ b/src/freenet/client/ArchiveHandlerImpl.java @@ -63,8 +63,7 @@ public void extractToCache(Bucket bucket, ArchiveContext actx, ArchiveManager manager, ClientContext context) throws ArchiveFailureException, ArchiveRestartException { forceRefetchArchive = false; // now we don't need to force refetch any more - ArchiveStoreContext ctx = manager.makeContext(key, archiveType, compressorType, false); - manager.extractToCache(key, archiveType, compressorType, bucket, actx, ctx, element, callback, context); + manager.extractToCache(key, archiveType, compressorType, bucket, actx, element, callback, context); } @Override diff --git a/src/freenet/client/ArchiveKey.java b/src/freenet/client/ArchiveKey.java deleted file mode 100644 index 91748c87305..00000000000 --- a/src/freenet/client/ArchiveKey.java +++ /dev/null @@ -1,36 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package freenet.client; - -import freenet.keys.FreenetURI; - -public class ArchiveKey { - - final FreenetURI key; - final String filename; - - public ArchiveKey(FreenetURI key2, String filename2) { - key = key2; - filename = filename2; - } - - @Override - public boolean equals(Object o) { - if((o == null) || !(o instanceof ArchiveKey)) return false; - if(this == o) return true; - - ArchiveKey cmp = ((ArchiveKey)o); - return (cmp.key.equals(key) && cmp.filename.equals(filename)); - } - - @Override - public int hashCode() { - return key.hashCode() ^ filename.hashCode(); - } - - @Override - public String toString() { - return key+":"+filename; - } -} \ No newline at end of file diff --git a/src/freenet/client/ArchiveManager.java b/src/freenet/client/ArchiveManager.java index aab4dd97f2f..7588a8abd49 100644 --- a/src/freenet/client/ArchiveManager.java +++ b/src/freenet/client/ArchiveManager.java @@ -16,14 +16,9 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; -import org.apache.commons.compress.archivers.ArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; - import freenet.client.async.ClientContext; import freenet.keys.FreenetURI; import freenet.support.ExceptionWrapper; -import freenet.support.LRUMap; import freenet.support.Logger; import freenet.support.Logger.LogLevel; import freenet.support.MutableBoolean; @@ -35,6 +30,9 @@ import freenet.support.io.Closer; import freenet.support.io.SkipShieldingInputStream; import net.contrapunctus.lzma.LzmaInputStream; +import org.apache.commons.compress.archivers.ArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; /** * Cache of recently decoded archives: @@ -42,8 +40,6 @@ * files open due to the limitations of the java.util.zip API) * - Keep up to Y bytes (after padding and overheads) of decoded data on disk * (the OS is quite capable of determining what to keep in actual RAM) - * - * Always take the lock on ArchiveStoreContext before the lock on ArchiveManager, NOT the other way around. */ public class ArchiveManager { @@ -109,26 +105,13 @@ public static ARCHIVE_TYPE getDefault() { final long maxArchivedFileSize; - // ArchiveHandler's - final int maxArchiveHandlers; - private final LRUMap archiveHandlers; - - // Data cache - /** Maximum number of cached ArchiveStoreItems */ - final int maxCachedElements; - /** Maximum cached data in bytes */ - final long maxCachedData; - /** Currently cached data in bytes */ - private long cachedData; - /** Map from ArchiveKey to ArchiveStoreElement */ - private final LRUMap storedData; + /** Archive bucket cache */ + private final ArchiveBucketCache cache; /** Bucket Factory */ private final BucketFactory tempBucketFactory; /** * Create an ArchiveManager. - * @param maxHandlers The maximum number of cached ArchiveHandler's i.e. the - * maximum number of containers to track. * @param maxCachedData The maximum size of the cache directory, in bytes. * @param maxArchiveSize The maximum size of an archive. * @param maxArchivedFileSize The maximum extracted size of a single file in any @@ -138,55 +121,13 @@ public static ARCHIVE_TYPE getDefault() { * file. * @param tempBucketFactory */ - public ArchiveManager(int maxHandlers, long maxCachedData, long maxArchivedFileSize, int maxCachedElements, BucketFactory tempBucketFactory) { - maxArchiveHandlers = maxHandlers; - // FIXME PERFORMANCE I'm assuming there isn't much locality here, so it's faster to use the FAST_COMPARATOR. - // This may not be true if there are a lot of sites with many containers all inserted as individual SSKs? - archiveHandlers = LRUMap.createSafeMap(FreenetURI.FAST_COMPARATOR); - this.maxCachedElements = maxCachedElements; - this.maxCachedData = maxCachedData; - storedData = new LRUMap(); + public ArchiveManager(int unused, long maxCachedData, long maxArchivedFileSize, int maxCachedElements, BucketFactory tempBucketFactory) { + this.cache = new ArchiveBucketCache(maxCachedElements, maxCachedData); this.maxArchivedFileSize = maxArchivedFileSize; this.tempBucketFactory = tempBucketFactory; logMINOR = Logger.shouldLog(LogLevel.MINOR, this); } - /** Add an ArchiveHandler by key */ - private synchronized void putCached(FreenetURI key, ArchiveStoreContext zip) { - if(logMINOR) Logger.minor(this, "Put cached AH for "+key+" : "+zip); - archiveHandlers.push(key, zip); - while(archiveHandlers.size() > maxArchiveHandlers) - archiveHandlers.popKey(); // dump it - } - - /** Get an ArchiveHandler by key */ - ArchiveStoreContext getCached(FreenetURI key) { - if(logMINOR) Logger.minor(this, "Get cached AH for "+key); - ArchiveStoreContext handler = archiveHandlers.get(key); - if(handler == null) return null; - archiveHandlers.push(key, handler); - return handler; - } - - /** - * Create an archive handler. This does not need to know how to - * fetch the key, because the methods called later will ask. - * It will try to serve from cache, but if that fails, will - * re-fetch. - * @param key The key of the archive that we are extracting data from. - * @param archiveType The archive type, defined in Metadata. - * @return An archive handler. - */ - synchronized ArchiveStoreContext makeContext(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean returnNullIfNotFound) { - ArchiveStoreContext handler = null; - handler = getCached(key); - if(handler != null) return handler; - if(returnNullIfNotFound) return null; - handler = new ArchiveStoreContext(key, archiveType); - putCached(key, handler); - return handler; - } - /** * Create an archive handler. This does not need to know how to * fetch the key, because the methods called later will ask. @@ -205,57 +146,30 @@ public ArchiveHandler makeHandler(FreenetURI key, ARCHIVE_TYPE archiveType, COMP * @param key The key used to fetch the archive. * @param filename The name of the file within the archive. * @return A Bucket containing the data requested, or null. - * @throws ArchiveFailureException */ - public Bucket getCached(FreenetURI key, String filename) throws ArchiveFailureException { - if(logMINOR) Logger.minor(this, "Fetch cached: "+key+ ' ' +filename); - ArchiveKey k = new ArchiveKey(key, filename); - ArchiveStoreItem asi = null; - synchronized (this) { - asi = storedData.get(k); - if(asi == null) return null; - // Promote to top of LRU - storedData.push(k, asi); + public Bucket getCached(FreenetURI key, String filename) { + if (logMINOR) { + Logger.minor(this, "Fetch cached: " + key + ' ' + filename); } - if(logMINOR) Logger.minor(this, "Found data"); - return asi.getReaderBucket(); + return cache.acquire(key, filename); } /** - * Remove a file from the cache. Called after it has been removed from its - * ArchiveHandler. - * @param item The ArchiveStoreItem to remove. - */ - synchronized void removeCachedItem(ArchiveStoreItem item) { - long size = item.spaceUsed(); - storedData.removeKey(item.key); - // Hard disk space limit = remove it here. - // Soft disk space limit would be to remove it outside the lock. - // Soft disk space limit = we go over the limit significantly when we - // are overloaded. - cachedData -= size; - if(logMINOR) Logger.minor(this, "removeCachedItem: "+item); - item.close(); - } - - /** - * Extract data to cache. Call synchronized on ctx. + * Extract data to cache. * @param key The key the data was fetched from. * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR. * @param data The actual data fetched. * @param archiveContext The context for the whole fetch process. - * @param ctx The ArchiveStoreContext for this key. * @param element A particular element that the caller is especially interested in, or null. * @param callback A callback to be called if we find that element, or if we don't. * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc. */ - public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { + void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { logMINOR = Logger.shouldLog(LogLevel.MINOR, this); MutableBoolean gotElement = element != null ? new MutableBoolean() : null; if(logMINOR) Logger.minor(this, "Extracting "+key); - ctx.removeAllCachedItems(this); // flush cache anyway final long archiveSize = data.size(); if(archiveSize > archiveContext.maxArchiveSize) @@ -322,10 +236,10 @@ public void run() { } if(ARCHIVE_TYPE.ZIP == archiveType) { - handleZIPArchive(ctx, key, is, element, callback, gotElement, context); + handleZIPArchive(key, is, element, callback, gotElement, context); } else if(ARCHIVE_TYPE.TAR == archiveType) { // COMPRESS-449 workaround, see https://freenet.mantishub.io/view.php?id=6921 - handleTARArchive(ctx, key, new SkipShieldingInputStream(is), element, callback, gotElement, context); + handleTARArchive(key, new SkipShieldingInputStream(is), element, callback, gotElement, context); } else { throw new ArchiveFailureException("Unknown or unsupported archive algorithm " + archiveType); } @@ -340,7 +254,7 @@ public void run() { } } - private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, ClientContext context) throws ArchiveFailureException { + private void handleTARArchive(FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, ClientContext context) throws ArchiveFailureException { if(logMINOR) Logger.minor(this, "Handling a TAR Archive"); TarArchiveInputStream tarIS = null; try { @@ -370,9 +284,7 @@ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre long size = entry.getSize(); if(name.equals(".metadata")) gotMetadata = true; - if(size > maxArchivedFileSize && !name.equals(element)) { - addErrorElement(ctx, key, name, "File too big: "+size+" greater than current archived file size limit "+maxArchivedFileSize, true); - } else { + if (size <= maxArchivedFileSize || name.equals(element)) { // Read the element long realLen = 0; Bucket output = tempBucketFactory.makeBucket(size); @@ -384,7 +296,6 @@ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre out.write(buf, 0, readBytes); readBytes += realLen; if(readBytes > maxArchivedFileSize) { - addErrorElement(ctx, key, name, "File too big: "+maxArchivedFileSize+" greater than current archived file size limit "+maxArchivedFileSize, true); out.close(); out = null; output.free(); @@ -396,22 +307,19 @@ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre if(out != null) out.close(); } if(size <= maxArchivedFileSize) { - addStoreElement(ctx, key, name, output, gotElement, element, callback, context); + addStoreElement(key, name, output, gotElement, element, callback, context); names.add(name); - trimStoredData(); } else { // We are here because they asked for this file. callback.gotBucket(output, context); gotElement.value = true; - addErrorElement(ctx, key, name, "File too big: "+size+" greater than current archived file size limit "+maxArchivedFileSize, true); } } } // If no metadata, generate some if(!gotMetadata) { - generateMetadata(ctx, key, names, gotElement, element, callback, context); - trimStoredData(); + generateMetadata(key, names, gotElement, element, callback, context); } if((!gotElement.value) && element != null) @@ -424,7 +332,7 @@ private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre } } - private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, ClientContext context) throws ArchiveFailureException { + private void handleZIPArchive(FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, ClientContext context) throws ArchiveFailureException { if(logMINOR) Logger.minor(this, "Handling a ZIP Archive"); ZipInputStream zis = null; try { @@ -449,9 +357,7 @@ private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre long size = entry.getSize(); if(name.equals(".metadata")) gotMetadata = true; - if(size > maxArchivedFileSize && !name.equals(element)) { - addErrorElement(ctx, key, name, "File too big: "+maxArchivedFileSize+" greater than current archived file size limit "+maxArchivedFileSize, true); - } else { + if (size <= maxArchivedFileSize || name.equals(element)) { // Read the element long realLen = 0; Bucket output = tempBucketFactory.makeBucket(size); @@ -463,34 +369,30 @@ private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStre out.write(buf, 0, readBytes); readBytes += realLen; if(readBytes > maxArchivedFileSize) { - addErrorElement(ctx, key, name, "File too big: "+maxArchivedFileSize+" greater than current archived file size limit "+maxArchivedFileSize, true); out.close(); out = null; output.free(); continue outerZIP; } } - + } finally { if(out != null) out.close(); } if(size <= maxArchivedFileSize) { - addStoreElement(ctx, key, name, output, gotElement, element, callback, context); + addStoreElement(key, name, output, gotElement, element, callback, context); names.add(name); - trimStoredData(); } else { // We are here because they asked for this file. callback.gotBucket(output, context); gotElement.value = true; - addErrorElement(ctx, key, name, "File too big: "+size+" greater than current archived file size limit "+maxArchivedFileSize, true); } } } // If no metadata, generate some if(!gotMetadata) { - generateMetadata(ctx, key, names, gotElement, element, callback, context); - trimStoredData(); + generateMetadata(key, names, gotElement, element, callback, context); } if((!gotElement.value) && element != null) @@ -517,7 +419,6 @@ private String stripLeadingSlashes(String name) { /** * Generate fake metadata for an archive which doesn't have any. - * @param ctx The context object. * @param key The key from which the archive we are unpacking was fetched. * @param names Set of names in the archive. * @param element2 @@ -525,7 +426,7 @@ private String stripLeadingSlashes(String name) { * @param callbackName If we generate a * @throws ArchiveFailureException */ - private ArchiveStoreItem generateMetadata(ArchiveStoreContext ctx, FreenetURI key, Set names, MutableBoolean gotElement, String element2, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { + private void generateMetadata(FreenetURI key, Set names, MutableBoolean gotElement, String element2, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { /* What we have to do is to: * - Construct a filesystem tree of the names. * - Turn each level of the tree into a Metadata object, including those below it, with @@ -545,10 +446,10 @@ private ArchiveStoreItem generateMetadata(ArchiveStoreContext ctx, FreenetURI ke while(true) { try { bucket = metadata.toBucket(tempBucketFactory); - return addStoreElement(ctx, key, ".metadata", bucket, gotElement, element2, callback, context); + addStoreElement(key, ".metadata", bucket, gotElement, element2, callback, context); } catch (MetadataUnresolvedException e) { try { - x = resolve(e, x, tempBucketFactory, ctx, key, gotElement, element2, callback, context); + x = resolve(e, x, tempBucketFactory, key, gotElement, element2, callback, context); } catch (IOException e1) { throw new ArchiveFailureException("Failed to create metadata: "+e1, e1); } @@ -559,12 +460,12 @@ private ArchiveStoreItem generateMetadata(ArchiveStoreContext ctx, FreenetURI ke } } - private int resolve(MetadataUnresolvedException e, int x, BucketFactory bf, ArchiveStoreContext ctx, FreenetURI key, MutableBoolean gotElement, String element2, ArchiveExtractCallback callback, ClientContext context) throws IOException, ArchiveFailureException { + private int resolve(MetadataUnresolvedException e, int x, BucketFactory bf, FreenetURI key, MutableBoolean gotElement, String element2, ArchiveExtractCallback callback, ClientContext context) throws IOException { for(Metadata m: e.mustResolve) { try { - addStoreElement(ctx, key, ".metadata-"+(x++), m.toBucket(bf), gotElement, element2, callback, context); + addStoreElement(key, ".metadata-"+(x++), m.toBucket(bf), gotElement, element2, callback, context); } catch (MetadataUnresolvedException e1) { - x = resolve(e, x, bf, ctx, key, gotElement, element2, callback, context); + x = resolve(e, x, bf, key, gotElement, element2, callback, context); continue; } } @@ -596,31 +497,6 @@ private void addToDirectory(HashMap dir, String name, String pre } } - /** - * Add an error element to the cache. This happens when a single file in the archive - * is invalid (usually because it is too large). - * @param ctx The ArchiveStoreContext which must be notified about this element's creation. - * @param key The key from which the archive was fetched. - * @param name The name of the file within the archive. - * @param error The error message to be included on the eventual exception thrown, - * if anyone tries to extract the data for this element. - */ - private void addErrorElement(ArchiveStoreContext ctx, FreenetURI key, String name, String error, boolean tooBig) { - ErrorArchiveStoreItem element = new ErrorArchiveStoreItem(ctx, key, name, error, tooBig); - element.addToContext(); - if(logMINOR) Logger.minor(this, "Adding error element: "+element+" for "+key+ ' ' +name); - ArchiveStoreItem oldItem; - synchronized (this) { - oldItem = storedData.get(element.key); - storedData.push(element.key, element); - if(oldItem != null) { - oldItem.close(); - cachedData -= oldItem.spaceUsed(); - if(logMINOR) Logger.minor(this, "Dropping old store element from archive cache: "+oldItem); - } - } - } - /** * Add a store element. * @param callbackName If set, the name of the file for which we must call the callback if this file happens to @@ -629,59 +505,17 @@ private void addErrorElement(ArchiveStoreContext ctx, FreenetURI key, String nam * it again. * @param callback Callback to be called if we do find it. We must getReaderBucket() before adding the data to the * LRU, otherwise it may be deleted before it reaches the client. - * @throws ArchiveFailureException If a failure occurred resulting in the data not being readable. Only happens if - * callback != null. */ - private ArchiveStoreItem addStoreElement(ArchiveStoreContext ctx, FreenetURI key, String name, Bucket temp, MutableBoolean gotElement, String callbackName, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { - RealArchiveStoreItem element = new RealArchiveStoreItem(ctx, key, name, temp); - element.addToContext(); - if(logMINOR) Logger.minor(this, "Adding store element: "+element+" ( "+key+ ' ' +name+" size "+element.spaceUsed()+" )"); - ArchiveStoreItem oldItem; - // Let it throw, if it does something is drastically wrong - Bucket matchBucket = null; - if((!gotElement.value) && name.equals(callbackName)) { - matchBucket = element.getReaderBucket(); + private void addStoreElement(FreenetURI key, String name, Bucket temp, MutableBoolean gotElement, String callbackName, ArchiveExtractCallback callback, ClientContext context) { + if (logMINOR) { + Logger.minor(this, "Adding store element ( " + key + ' ' + name + " size " + temp.size() + " )"); } - synchronized (this) { - oldItem = storedData.get(element.key); - storedData.push(element.key, element); - cachedData += element.spaceUsed(); - if(oldItem != null) { - cachedData -= oldItem.spaceUsed(); - if(logMINOR) Logger.minor(this, "Dropping old store element from archive cache: "+oldItem); - oldItem.close(); - } - } - if(matchBucket != null) { - callback.gotBucket(matchBucket, context); + Bucket acquiredBucket = cache.addAndAcquire(key, name, temp); + if (!gotElement.value && name.equals(callbackName)) { + callback.gotBucket(acquiredBucket, context); gotElement.value = true; - } - return element; - } - - /** - * Drop any stored data beyond the limit. - * Call synchronized on storedData. - */ - private void trimStoredData() { - synchronized(this) { - while(true) { - ArchiveStoreItem item; - if(cachedData <= maxCachedData && storedData.size() <= maxCachedElements) return; - if(storedData.isEmpty()) { - // Race condition? cachedData out of sync? - Logger.error(this, "storedData is empty but still over limit: cachedData="+cachedData+" / "+maxCachedData); - return; - } - item = storedData.popValue(); - long space = item.spaceUsed(); - cachedData -= space; - // Hard limits = delete file within lock, soft limits = delete outside of lock - // Here we use a hard limit - if(logMINOR) - Logger.minor(this, "Dropping "+item+" : cachedData="+cachedData+" of "+maxCachedData+" stored items : "+storedData.size()+" of "+maxCachedElements); - item.close(); - } + } else { + acquiredBucket.free(); } } diff --git a/src/freenet/client/ArchiveStoreContext.java b/src/freenet/client/ArchiveStoreContext.java deleted file mode 100644 index 2a9ee198762..00000000000 --- a/src/freenet/client/ArchiveStoreContext.java +++ /dev/null @@ -1,117 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package freenet.client; - -import java.util.LinkedList; - -import freenet.keys.FreenetURI; -import freenet.support.LogThresholdCallback; -import freenet.support.Logger; -import freenet.support.Logger.LogLevel; - -/** - * Tracks all files currently in the cache from a given key. - * Keeps the last known hash of the key (if this changes in a fetch, we flush the cache, unpack, - * then throw an ArchiveRestartedException). - * Provides fetch methods for Fetcher, which try the cache and then fetch if necessary, - * subject to the above. - * - * Always take the lock on ArchiveStoreContext before the lock on ArchiveManager, NOT the other way around. - */ -class ArchiveStoreContext { - - private FreenetURI key; - private final ArchiveManager.ARCHIVE_TYPE archiveType; - /** Archive size */ - private long lastSize = -1; - /** Archive hash */ - private byte[] lastHash; - /** Index of still-cached ArchiveStoreItems with this key. - * Note that we never ever hold this and then take another lock! In particular - * we must not take the ArchiveManager lock while holding this lock. It must be - * the inner lock to avoid deadlocks. */ - private final LinkedList myItems; - - private static volatile boolean logMINOR; - static { - Logger.registerLogThresholdCallback(new LogThresholdCallback(){ - @Override - public void shouldUpdate(){ - logMINOR = Logger.shouldLog(LogLevel.MINOR, this); - } - }); - } - - ArchiveStoreContext(FreenetURI key, ArchiveManager.ARCHIVE_TYPE archiveType) { - this.key = key; - this.archiveType = archiveType; - myItems = new LinkedList(); - } - - /** Returns the size of the archive last time we fetched it, or -1 */ - long getLastSize() { - return lastSize; - } - - /** Sets the size of the archive - @see getLastSize() */ - void setLastSize(long size) { - lastSize = size; - } - - - /** Returns the hash of the archive last time we fetched it, or null */ - byte[] getLastHash() { - return lastHash; - } - - /** Sets the hash of the archive - @see getLastHash() */ - void setLastHash(byte[] realHash) { - lastHash = realHash; - } - - /** - * Remove all ArchiveStoreItems with this key from the cache. - */ - void removeAllCachedItems(ArchiveManager manager) { - ArchiveStoreItem item = null; - while(true) { - synchronized (myItems) { - // removeCachedItem() will call removeItem(), so don't remove it here. - item = myItems.peek(); - } - if(item == null) break; - manager.removeCachedItem(item); - } - } - - /** Notify that a new archive store item with this key has been added to the cache. */ - void addItem(ArchiveStoreItem item) { - synchronized(myItems) { - myItems.push(item); - } - } - - /** Notify that an archive store item with this key has been expelled from the - * cache. Remove it from our local cache and ask it to free the bucket if - * necessary. */ - void removeItem(ArchiveStoreItem item) { - synchronized(myItems) { - if (!myItems.remove(item)) { - if(logMINOR) - Logger.minor(this, "Not removing: "+item+" for "+this+" - already removed"); - return; // only removed once - } - } - item.innerClose(); - } - - public short getArchiveType() { - return archiveType.metadataID; - } - - public FreenetURI getKey() { - return key; - } - -} diff --git a/src/freenet/client/ArchiveStoreItem.java b/src/freenet/client/ArchiveStoreItem.java deleted file mode 100644 index 362208aeb70..00000000000 --- a/src/freenet/client/ArchiveStoreItem.java +++ /dev/null @@ -1,55 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package freenet.client; - -import freenet.support.api.Bucket; - -/** - * Base class for items stored in the archive cache. - */ -abstract class ArchiveStoreItem { - final ArchiveKey key; - final ArchiveStoreContext context; - - /** Basic constructor. */ - ArchiveStoreItem(ArchiveKey key, ArchiveStoreContext context) { - this.key = key; - this.context = context; - } - - protected void addToContext() { - context.addItem(this); - } - - /** Delete any stored data on disk etc. - * Override in subtypes for specific cleanup. - * Will be called with locks held, so should only do low level operations - * such as deletes.. */ - void innerClose() { } // override in subtypes for cleanup - - /** - * Shortcut to start the removal/cleanup process. - */ - final void close() { - context.removeItem(this); - } - - /** - * Return cached data as a Bucket, or throw an ArchiveFailureException. - */ - abstract Bucket getDataOrThrow() throws ArchiveFailureException; - - /** - * Return the amount of cache space used by the item. May be called inside - * locks so should not take any nontrivial locks or take long. - */ - abstract long spaceUsed(); - - /** - * Get the data as a Bucket, and guarantee that it won't be freed until the - * returned object is either finalized or freed. - */ - abstract Bucket getReaderBucket() throws ArchiveFailureException; - -} diff --git a/src/freenet/client/ErrorArchiveStoreItem.java b/src/freenet/client/ErrorArchiveStoreItem.java deleted file mode 100644 index 3c04d6b56a3..00000000000 --- a/src/freenet/client/ErrorArchiveStoreItem.java +++ /dev/null @@ -1,53 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package freenet.client; - -import freenet.keys.FreenetURI; -import freenet.support.api.Bucket; - -class ErrorArchiveStoreItem extends ArchiveStoreItem { - - /** Error message. Usually something about the file being too big. */ - String error; - boolean tooBig; - - /** - * Create a placeholder item for a file which could not be extracted from the archive. - * @param ctx The context object which tracks all the items with this key. - * @param key2 The key from which the archive was fetched. - * @param name The name of the file which failed to extract. - * @param error The error message to be included in the thrown exception when - * somebody tries to get the data. - */ - public ErrorArchiveStoreItem(ArchiveStoreContext ctx, FreenetURI key2, String name, String error, boolean tooBig) { - super(new ArchiveKey(key2, name), ctx); - this.error = error; - this.tooBig = tooBig; - } - - /** - * Throws an exception with the given error message, because this file could not be - * extracted from the archive. - */ - @Override - Bucket getDataOrThrow() throws ArchiveFailureException { - throw new ArchiveFailureException(error); - } - - @Override - public long spaceUsed() { - return 0; - } - - @Override - Bucket getReaderBucket() throws ArchiveFailureException { - if(tooBig) return null; - throw new ArchiveFailureException(error); - } - - public boolean tooBig() { - return tooBig; - } - -} diff --git a/src/freenet/client/RealArchiveStoreItem.java b/src/freenet/client/RealArchiveStoreItem.java deleted file mode 100644 index 650ecfc9ee7..00000000000 --- a/src/freenet/client/RealArchiveStoreItem.java +++ /dev/null @@ -1,91 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package freenet.client; - -import freenet.keys.FreenetURI; -import freenet.support.LogThresholdCallback; -import freenet.support.Logger; -import freenet.support.Logger.LogLevel; -import freenet.support.api.Bucket; -import freenet.support.io.MultiReaderBucket; - -class RealArchiveStoreItem extends ArchiveStoreItem { - - private final MultiReaderBucket mb; - private final Bucket bucket; - private final long spaceUsed; - - private static volatile boolean logMINOR; - static { - Logger.registerLogThresholdCallback(new LogThresholdCallback(){ - @Override - public void shouldUpdate(){ - logMINOR = Logger.shouldLog(LogLevel.MINOR, this); - } - }); - } - - /** - * Create an ArchiveStoreElement from a TempStoreElement. - * @param key2 The key of the archive the file came from. - * @param realName The name of the file in that archive. - * @param temp The TempStoreElement currently storing the data. - * @param manager The parent ArchiveManager within which this item is stored. - */ - RealArchiveStoreItem(ArchiveStoreContext ctx, FreenetURI key2, String realName, Bucket bucket) { - super(new ArchiveKey(key2, realName), ctx); - if(bucket == null) throw new NullPointerException(); - mb = new MultiReaderBucket(bucket); - this.bucket = mb.getReaderBucket(); - if(this.bucket == null) throw new NullPointerException(); - this.bucket.setReadOnly(); - spaceUsed = this.bucket.size(); - } - - /** - * Return the data, as a Bucket, in plaintext. - */ - Bucket dataAsBucket() { - return bucket; - } - - /** - * Return the length of the data. - */ - long dataSize() { - return bucket.size(); - } - - /** - * Return the estimated space used by the data. - */ - @Override - long spaceUsed() { - return spaceUsed; - } - - @Override - void innerClose() { - if(logMINOR) - Logger.minor(this, "innerClose(): "+this+" : "+bucket); - if(bucket == null) { - // This still happens. It is clearly impossible as we check in the constructor and throw if it is null. - // Nonetheless there is little we can do here ... - Logger.error(this, "IMPOSSIBLE: BUCKET IS NULL!", new Exception("error")); - return; - } - bucket.free(); - } - - @Override - Bucket getDataOrThrow() throws ArchiveFailureException { - return dataAsBucket(); - } - - @Override - Bucket getReaderBucket() throws ArchiveFailureException { - return mb.getReaderBucket(); - } - -} diff --git a/test/freenet/client/ArchiveBucketCacheTest.java b/test/freenet/client/ArchiveBucketCacheTest.java new file mode 100644 index 00000000000..5a8a47b16e6 --- /dev/null +++ b/test/freenet/client/ArchiveBucketCacheTest.java @@ -0,0 +1,137 @@ +package freenet.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; + +import freenet.keys.FreenetURI; +import freenet.support.api.Bucket; +import freenet.support.io.ArrayBucket; +import freenet.support.io.FileUtil; +import org.junit.Test; + +public class ArchiveBucketCacheTest { + + private final FreenetURI archiveUri = new FreenetURI("KSK", "archive"); + private final String originalContent = "archive file contents"; + private final Bucket originalBucket = new ArrayBucket(originalContent.getBytes(StandardCharsets.UTF_8)); + + private final ArchiveBucketCache singleBucketCache = new ArchiveBucketCache(1, Integer.MAX_VALUE); + + @Test + public void acquireReturnsNullWhenNotFound() { + Bucket acquired = singleBucketCache.acquire(archiveUri, "file"); + assertNull("Acquired bucket should be null", acquired); + } + + @Test + public void addAndAcquiredBucketWithReadOnlyReferenceToOriginal() { + Bucket acquired = singleBucketCache.addAndAcquire(archiveUri, "file", originalBucket); + assertTrue("Acquired bucket should be read-only", acquired.isReadOnly()); + assertFalse("Acquired bucket should not be freed", isFreed(acquired)); + assertEquals("Acquired bucket should reflect original content", originalContent, readFully(acquired)); + } + + @Test + public void acquireBucketWithReadOnlyReferenceToOriginal() { + singleBucketCache.addAndAcquire(archiveUri, "file", originalBucket).free(); + Bucket acquired = singleBucketCache.acquire(new FreenetURI("KSK", "archive"), "file"); + assertTrue("Acquired bucket should be read-only", acquired.isReadOnly()); + assertFalse("Acquired bucket should not be freed", isFreed(acquired)); + assertEquals("Acquired bucket should reflect original content", originalContent, readFully(acquired)); + } + + @Test + public void freeUnreferencedBucketWhenEvicted() { + // Acquire the first entry and free it immediately + Bucket aqcuired = singleBucketCache.addAndAcquire(archiveUri, "file", originalBucket); + aqcuired.free(); + assertFalse("Original bucket should not be freed", isFreed(originalBucket)); + + // Add another entry to cause eviction of the first + singleBucketCache.addAndAcquire(archiveUri, "other", new ArrayBucket()); + assertTrue("Original bucket should be freed", isFreed(originalBucket)); + } + + @Test + public void freeEvictedBucketWhenUnreferenced() { + // Acquire the first and then add another entry to cause eviction of the first + Bucket aqcuired = singleBucketCache.addAndAcquire(archiveUri, "file", originalBucket); + singleBucketCache.addAndAcquire(archiveUri, "other", new ArrayBucket()); + assertFalse("Original bucket should not be freed", isFreed(originalBucket)); + + // Free the reference to the evicted entry + aqcuired.free(); + assertTrue("Original bucket should be freed", isFreed(originalBucket)); + } + + @Test + public void freeEvictedBucketWhenAllUnreferenced() { + Bucket aqcuired1 = singleBucketCache.addAndAcquire(archiveUri, "file", originalBucket); + Bucket aqcuired2 = singleBucketCache.acquire(archiveUri, "file"); + singleBucketCache.addAndAcquire(archiveUri, "other", new ArrayBucket()); + + aqcuired1.free(); + assertFalse("Original bucket should not be freed", isFreed(originalBucket)); + + aqcuired2.free(); + assertTrue("Original bucket should be freed", isFreed(originalBucket)); + } + + @Test + public void evictsLeastRecentlyUsed() { + ArchiveBucketCache threeBucketCache = new ArchiveBucketCache(3, Integer.MAX_VALUE); + threeBucketCache.addAndAcquire(archiveUri, "file-1", originalBucket); + threeBucketCache.addAndAcquire(archiveUri, "file-2", originalBucket); + threeBucketCache.addAndAcquire(archiveUri, "file-3", originalBucket); + + threeBucketCache.acquire(archiveUri, "file-1"); // file-2 is now least recently used + threeBucketCache.addAndAcquire(archiveUri, "file-4", originalBucket); // trigger eviction by adding 4th item + assertNull("Entry file-2 should be evicted", threeBucketCache.acquire(archiveUri, "file-2")); + assertNotNull("Entry file-1 should be cached", threeBucketCache.acquire(archiveUri, "file-1")); + assertNotNull("Entry file-3 should be cached", threeBucketCache.acquire(archiveUri, "file-3")); + assertNotNull("Entry file-4 should be cached", threeBucketCache.acquire(archiveUri, "file-4")); + } + + @Test + public void evictsBasedOnSize() { + ArchiveBucketCache threeByteCache = new ArchiveBucketCache(Integer.MAX_VALUE, 3); + threeByteCache.addAndAcquire(archiveUri, "small-file-1", new ArrayBucket(new byte[1])); + threeByteCache.addAndAcquire(archiveUri, "small-file-2", new ArrayBucket(new byte[1])); + threeByteCache.addAndAcquire(archiveUri, "small-file-3", new ArrayBucket(new byte[1])); + assertNotNull("Entry small-file-1 should be cached", threeByteCache.acquire(archiveUri, "small-file-1")); + assertNotNull("Entry small-file-2 should be cached", threeByteCache.acquire(archiveUri, "small-file-2")); + assertNotNull("Entry small-file-3 should be cached", threeByteCache.acquire(archiveUri, "small-file-3")); + + threeByteCache.addAndAcquire(archiveUri, "large-file", new ArrayBucket(new byte[2])); + assertNotNull("Entry large-file should be cached", threeByteCache.acquire(archiveUri, "large-file")); + assertNotNull("Entry small-file-3 should be cached", threeByteCache.acquire(archiveUri, "small-file-3")); + assertNull("Entry small-file-1 should be evicted", threeByteCache.acquire(archiveUri, "small-file-1")); + assertNull("Entry small-file-2 should be evicted", threeByteCache.acquire(archiveUri, "small-file-2")); + } + + private static String readFully(Bucket bucket) { + try (InputStream in = bucket.getInputStream()) { + return FileUtil.readUTF(in).toString(); + } catch (IOException e) { + throw new RuntimeException("Could not read bucket contents", e); + } + } + + private static boolean isFreed(Bucket bucket) { + try { + Field freed = bucket.getClass().getDeclaredField("freed"); + freed.setAccessible(true); + return (boolean) freed.get(bucket); + } catch (Exception e) { + throw new RuntimeException("Could not read bucket freed status", e); + } + } +} From b8158e75b99886bdb681a541430074dbd1572d09 Mon Sep 17 00:00:00 2001 From: Bert Massop Date: Sun, 1 Dec 2024 17:16:42 +0100 Subject: [PATCH 3/6] Remove unused code from ArchiveHandlerImpl --- src/freenet/client/ArchiveHandlerImpl.java | 34 ++++++---------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/src/freenet/client/ArchiveHandlerImpl.java b/src/freenet/client/ArchiveHandlerImpl.java index 8535f145334..783a706e32a 100644 --- a/src/freenet/client/ArchiveHandlerImpl.java +++ b/src/freenet/client/ArchiveHandlerImpl.java @@ -31,37 +31,25 @@ class ArchiveHandlerImpl implements ArchiveHandler, Serializable { } @Override - public Bucket get(String internalName, ArchiveContext archiveContext, - ArchiveManager manager) - throws ArchiveFailureException, ArchiveRestartException, - MetadataParseException, FetchException { - - if(forceRefetchArchive) return null; - - Bucket data; - - // Fetch from cache - if(logMINOR) - Logger.minor(this, "Checking cache: "+key+ ' ' +internalName); - if((data = manager.getCached(key, internalName)) != null) { - return data; + public Bucket get(String internalName, ArchiveContext archiveContext, ArchiveManager manager) { + if (forceRefetchArchive) { + return null; } - - return null; + if (logMINOR) { + Logger.minor(this, "Checking cache: " + key + ' ' + internalName); + } + return manager.getCached(key, internalName); } @Override - public Bucket getMetadata(ArchiveContext archiveContext, - ArchiveManager manager) throws ArchiveFailureException, - ArchiveRestartException, MetadataParseException, FetchException { + public Bucket getMetadata(ArchiveContext archiveContext, ArchiveManager manager) { return get(".metadata", archiveContext, manager); } @Override public void extractToCache(Bucket bucket, ArchiveContext actx, String element, ArchiveExtractCallback callback, - ArchiveManager manager, ClientContext context) throws ArchiveFailureException, - ArchiveRestartException { + ArchiveManager manager, ClientContext context) throws ArchiveFailureException { forceRefetchArchive = false; // now we don't need to force refetch any more manager.extractToCache(key, archiveType, compressorType, bucket, actx, element, callback, context); } @@ -71,10 +59,6 @@ public ARCHIVE_TYPE getArchiveType() { return archiveType; } - public COMPRESSOR_TYPE getCompressorType() { - return compressorType; - } - @Override public FreenetURI getKey() { return key; From 7f7ce7c72cd6bdd5f06dea2c3e07ff609f67189f Mon Sep 17 00:00:00 2001 From: Bert Massop Date: Sun, 1 Dec 2024 17:16:43 +0100 Subject: [PATCH 4/6] Deprecate use of ArchiveRestartException now that it is no longer thrown --- src/freenet/client/ArchiveExtractCallback.java | 4 +++- src/freenet/client/ArchiveRestartException.java | 1 + src/freenet/client/async/SingleFileFetcher.java | 12 ------------ 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/src/freenet/client/ArchiveExtractCallback.java b/src/freenet/client/ArchiveExtractCallback.java index cd5e44d58ed..55e2e43d5a0 100644 --- a/src/freenet/client/ArchiveExtractCallback.java +++ b/src/freenet/client/ArchiveExtractCallback.java @@ -17,7 +17,9 @@ public interface ArchiveExtractCallback extends Serializable { public void notInArchive(ClientContext context); /** Failed: restart */ - public void onFailed(ArchiveRestartException e, ClientContext context); + @Deprecated + default void onFailed(ArchiveRestartException e, ClientContext context) { + } /** Failed for some other reason */ public void onFailed(ArchiveFailureException e, ClientContext context); diff --git a/src/freenet/client/ArchiveRestartException.java b/src/freenet/client/ArchiveRestartException.java index 4bb039a429e..2bffb3d4b7b 100644 --- a/src/freenet/client/ArchiveRestartException.java +++ b/src/freenet/client/ArchiveRestartException.java @@ -12,6 +12,7 @@ public class ArchiveRestartException extends Exception { private static final long serialVersionUID = -7670838856130773012L; + @Deprecated public ArchiveRestartException(String msg) { super(msg); } diff --git a/src/freenet/client/async/SingleFileFetcher.java b/src/freenet/client/async/SingleFileFetcher.java index 536db1880d2..3ace242212b 100644 --- a/src/freenet/client/async/SingleFileFetcher.java +++ b/src/freenet/client/async/SingleFileFetcher.java @@ -478,10 +478,6 @@ public void notInArchive(ClientContext context) { onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "No metadata in container! Cannot happen as ArchiveManager should synthesise some!"), false, context); } @Override - public void onFailed(ArchiveRestartException e, ClientContext context) { - SingleFileFetcher.this.onFailure(new FetchException(e), false, context); - } - @Override public void onFailed(ArchiveFailureException e, ClientContext context) { SingleFileFetcher.this.onFailure(new FetchException(e), false, context); } @@ -544,10 +540,6 @@ public void notInArchive(ClientContext context) { onFailure(new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE), false, context); } @Override - public void onFailed(ArchiveRestartException e, ClientContext context) { - SingleFileFetcher.this.onFailure(new FetchException(e), false, context); - } - @Override public void onFailed(ArchiveFailureException e, ClientContext context) { SingleFileFetcher.this.onFailure(new FetchException(e), false, context); } @@ -612,10 +604,6 @@ public void notInArchive(ClientContext context) { onFailure(new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE), false, context); } @Override - public void onFailed(ArchiveRestartException e, ClientContext context) { - SingleFileFetcher.this.onFailure(new FetchException(e), false, context); - } - @Override public void onFailed(ArchiveFailureException e, ClientContext context) { SingleFileFetcher.this.onFailure(new FetchException(e), false, context); } From f5f195a48cf6e0bc940ffc89100dbd5782a557b5 Mon Sep 17 00:00:00 2001 From: Bert Massop Date: Sun, 1 Dec 2024 17:16:43 +0100 Subject: [PATCH 5/6] Reduce paranoia around fields being null in MultiReaderBucket --- src/freenet/support/io/MultiReaderBucket.java | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/src/freenet/support/io/MultiReaderBucket.java b/src/freenet/support/io/MultiReaderBucket.java index 1270015c862..cd8559d95d9 100644 --- a/src/freenet/support/io/MultiReaderBucket.java +++ b/src/freenet/support/io/MultiReaderBucket.java @@ -9,6 +9,7 @@ import java.io.OutputStream; import java.io.Serializable; import java.util.ArrayList; +import java.util.Objects; import freenet.client.async.ClientContext; import freenet.support.LogThresholdCallback; @@ -24,14 +25,12 @@ * @author toad */ public class MultiReaderBucket implements Serializable { - - private static final long serialVersionUID = 1L; - private final Bucket bucket; - - // Assume there will be relatively few readers - private ArrayList readers; - + private static final long serialVersionUID = 1L; + + private final Bucket bucket; + private final ArrayList readers = new ArrayList<>(); + private boolean closed; private static volatile boolean logMINOR; @@ -46,24 +45,16 @@ public void shouldUpdate() { } public MultiReaderBucket(Bucket underlying) { + Objects.requireNonNull(underlying); bucket = underlying; } - - protected MultiReaderBucket() { - // For serialization. - bucket = null; - } /** Get a reader bucket */ public Bucket getReaderBucket() { synchronized(this) { if(closed) return null; Bucket d = new ReaderBucket(); - if (readers == null) - readers = new ArrayList(1); readers.add(d); - if(logMINOR) - Logger.minor(this, "getReaderBucket() returning "+d+" for "+this+" for "+bucket); return d; } } @@ -82,7 +73,6 @@ public void free() { freed = true; ListUtils.removeBySwapLast(readers, this); if(!readers.isEmpty()) return; - readers = null; if(closed) return; closed = true; } From 6402dbc8aa027d66a8388fa9d4a4eef3798477f2 Mon Sep 17 00:00:00 2001 From: Bert Massop Date: Sun, 1 Dec 2024 17:16:43 +0100 Subject: [PATCH 6/6] Use log level callback to populate logMINOR in ArchiveManager --- src/freenet/client/ArchiveManager.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/freenet/client/ArchiveManager.java b/src/freenet/client/ArchiveManager.java index 7588a8abd49..0b10e71f0f0 100644 --- a/src/freenet/client/ArchiveManager.java +++ b/src/freenet/client/ArchiveManager.java @@ -20,7 +20,6 @@ import freenet.keys.FreenetURI; import freenet.support.ExceptionWrapper; import freenet.support.Logger; -import freenet.support.Logger.LogLevel; import freenet.support.MutableBoolean; import freenet.support.api.Bucket; import freenet.support.api.BucketFactory; @@ -44,7 +43,11 @@ public class ArchiveManager { public static final String METADATA_NAME = ".metadata"; - private static boolean logMINOR; + + private static volatile boolean logMINOR; + static { + Logger.registerClass(ArchiveManager.class); + } public enum ARCHIVE_TYPE { // WARNING: This enum is persisted. Changing member names may break downloads/uploads. @@ -125,7 +128,6 @@ public ArchiveManager(int unused, long maxCachedData, long maxArchivedFileSize, this.cache = new ArchiveBucketCache(maxCachedElements, maxCachedData); this.maxArchivedFileSize = maxArchivedFileSize; this.tempBucketFactory = tempBucketFactory; - logMINOR = Logger.shouldLog(LogLevel.MINOR, this); } /** @@ -165,8 +167,6 @@ public Bucket getCached(FreenetURI key, String filename) { * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc. */ void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, String element, ArchiveExtractCallback callback, ClientContext context) throws ArchiveFailureException { - logMINOR = Logger.shouldLog(LogLevel.MINOR, this); - MutableBoolean gotElement = element != null ? new MutableBoolean() : null; if(logMINOR) Logger.minor(this, "Extracting "+key);