From ca870828b81ed05ab5d902264059ac0b42055d80 Mon Sep 17 00:00:00 2001 From: Radek Felcman Date: Thu, 16 May 2024 11:18:06 +0200 Subject: [PATCH] CacheKey NPE and DeadLock Signed-off-by: Radek Felcman --- .../ReadLockAcquisitionMetadataTest.java | 196 ++ .../config/PersistenceUnitProperties.java | 46 +- .../internal/helper/ConcurrencyManager.java | 41 +- .../internal/helper/ConcurrencyUtil.java | 295 ++- .../helper/ConcurrencyUtil.java.ORIGIN | 1692 +++++++++++++++++ .../internal/helper/ReadLockManager.java | 137 +- .../helper/type/ConcurrencyManagerState.java | 33 +- .../internal/identitymaps/CacheKey.java | 163 +- .../identitymaps/CacheKey.java.ORIGIN | 607 ++++++ .../i18n/TraceLocalizationResource.java | 26 +- .../TraceLocalizationResource.java.ORIGIN | 557 ++++++ .../internal/sessions/AbstractSession.java | 371 +++- .../internal/jpa/EntityManagerSetupImpl.java | 18 + 13 files changed, 4122 insertions(+), 60 deletions(-) create mode 100644 foundation/eclipselink.core.test/src/org/eclipse/persistence/testing/tests/junit/helper/ReadLockAcquisitionMetadataTest.java create mode 100644 foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java.ORIGIN create mode 100644 foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java.ORIGIN create mode 100644 foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java.ORIGIN diff --git a/foundation/eclipselink.core.test/src/org/eclipse/persistence/testing/tests/junit/helper/ReadLockAcquisitionMetadataTest.java b/foundation/eclipselink.core.test/src/org/eclipse/persistence/testing/tests/junit/helper/ReadLockAcquisitionMetadataTest.java new file mode 100644 index 00000000000..254d0bfbae4 --- /dev/null +++ b/foundation/eclipselink.core.test/src/org/eclipse/persistence/testing/tests/junit/helper/ReadLockAcquisitionMetadataTest.java @@ -0,0 +1,196 @@ +/* + * ----------------------------------------------------------------------------- + * manually + * Removed text manually + * Removed text manually + * ----------------------------------------------------------------------------- + */ +package org.eclipse.persistence.testing.tests.junit.helper; + +import java.util.List; + +import org.eclipse.persistence.internal.helper.ConcurrencyManager; +import org.eclipse.persistence.internal.helper.ReadLockManager; +import org.eclipse.persistence.internal.helper.type.ReadLockAcquisitionMetadata; +import org.eclipse.persistence.internal.identitymaps.CacheKey; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Unit test for the class {@link org.eclipse.persistence.internal.helper.ReadLockManager} + */ +public class ReadLockAcquisitionMetadataTest { + + // Inner class + /** + * Unit test extension class to allows to create some hacky methods to access + * and manipulate the state of the read lock manager. + * Such as adding NULL records into the state. + */ + public static class ReadLockManagerExtension extends ReadLockManager{ + + /** + * This is a dummy method hack our state maps with null entries. + * Our expectation is that this should never happen in real life + * but we have seen null pointer exceptions that seem to indicate that this is factuallz posisbly to happen + * although we have understoor the underlying reason for the null entries. + * + * With this method we simulate corrupting our read data structures with null entries. + */ + public void hackReadLockManagerToBeCorruptedWithNullRecordsItsState() { + ConcurrencyManager cacheKeyNull = null; + readLocks.add(cacheKeyNull); + final Thread currentThread = Thread.currentThread(); + final long currentThreadId = currentThread.getId(); + ReadLockAcquisitionMetadata readLockAcquisitionMetadataNull = null; + List readLocksAcquiredDuringCurrentThreadList = mapThreadToReadLockAcquisitionMetadata.get(currentThreadId); + readLocksAcquiredDuringCurrentThreadList.add(readLockAcquisitionMetadataNull); + + } + + + } + // helpver variables + + final ConcurrencyManager cacheKeyA = new CacheKey("cacheKeyA"); + final ConcurrencyManager cacheKeyB = new CacheKey("cacheKeyB"); + + + @Before + public void before() { + + } + + + @Test + public void normalHappyPathLogicAddingAndRemovingMetadataIntoTheReadLockManager() { + // SETUP: + // basic variable initialization + ReadLockManagerExtension testee = new ReadLockManagerExtension(); + + + // EXECUTE 1 - Add cache key A + testee.addReadLock(cacheKeyA); + + // VERIFY 1 + + Assert.assertEquals(1, testee.getReadLocks().size()); + Assert.assertTrue(testee.getReadLocks().contains(cacheKeyA)); + Assert.assertFalse(testee.getReadLocks().contains(cacheKeyB)); + + Assert.assertEquals(1, testee.getMapThreadToReadLockAcquisitionMetadata().size()); + List cacheKeyMetadataForCurrentThread =testee.getMapThreadToReadLockAcquisitionMetadata().get(Thread.currentThread().getId()); + Assert.assertEquals(1, cacheKeyMetadataForCurrentThread.size()); + + Assert.assertTrue(cacheKeyA == cacheKeyMetadataForCurrentThread.get(0).getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()); + Assert.assertFalse(cacheKeyB == cacheKeyMetadataForCurrentThread.get(0).getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()); + + + // EXECUTE 2 - Add cache key B + testee.addReadLock(cacheKeyB); + + // VERIFY 2 + + Assert.assertEquals(2, testee.getReadLocks().size()); + Assert.assertTrue(testee.getReadLocks().contains(cacheKeyA)); + Assert.assertTrue(testee.getReadLocks().contains(cacheKeyB)); + + Assert.assertEquals(1, testee.getMapThreadToReadLockAcquisitionMetadata().size()); + cacheKeyMetadataForCurrentThread =testee.getMapThreadToReadLockAcquisitionMetadata().get(Thread.currentThread().getId()); + Assert.assertEquals(2, cacheKeyMetadataForCurrentThread.size()); + + // note: when we are adding, we are adding the entries to the HEAD of the list + Assert.assertTrue(cacheKeyB == cacheKeyMetadataForCurrentThread.get(0).getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()); + Assert.assertTrue(cacheKeyA == cacheKeyMetadataForCurrentThread.get(1).getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()); + + + // EXECUTE 3 - Remove cache keys + testee.removeReadLock(cacheKeyA); + + // VERIFY 3 + Assert.assertEquals(1, testee.getReadLocks().size()); + Assert.assertFalse(testee.getReadLocks().contains(cacheKeyA)); + Assert.assertTrue(testee.getReadLocks().contains(cacheKeyB)); + + Assert.assertEquals(1, testee.getMapThreadToReadLockAcquisitionMetadata().size()); + cacheKeyMetadataForCurrentThread =testee.getMapThreadToReadLockAcquisitionMetadata().get(Thread.currentThread().getId()); + Assert.assertEquals(1, cacheKeyMetadataForCurrentThread.size()); + + Assert.assertTrue(cacheKeyB == cacheKeyMetadataForCurrentThread.get(0).getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()); + Assert.assertFalse(cacheKeyA == cacheKeyMetadataForCurrentThread.get(0).getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()); + + // EXECUTE 4 - Remove cache keys + testee.removeReadLock(cacheKeyB); + + // VERIFY 4 + Assert.assertEquals(0, testee.getReadLocks().size()); + Assert.assertFalse(testee.getReadLocks().contains(cacheKeyA)); + Assert.assertFalse(testee.getReadLocks().contains(cacheKeyB)); + Assert.assertEquals(0, testee.getMapThreadToReadLockAcquisitionMetadata().size()); + + + } + + @Test + public void testAddNullReadCacheKeyDoesNothing() { + // SETUP: + // basic variable initialization + ReadLockManagerExtension testee = new ReadLockManagerExtension(); + ConcurrencyManager cacheKeyNull = null; + + // EXECUTE + // try to add a null cache key to the map + testee.addReadLock(cacheKeyNull); + + // VERIFY + Assert.assertEquals(0, testee.getReadLocks().size()); + Assert.assertEquals(0, testee.getMapThreadToReadLockAcquisitionMetadata().size()); + } + + /** + * The purpose of this unit test is to make sure that if for some unknown reason + * we ever end up adding NULL as metadata to either the READ Lock manager + * or to the VECTOR of read locks + * that we can self heald and remove them from the map automatically. + * + */ + @Test + public void testRemoveWhen_mapThreadToReadLockAcquisitionMetadata_containsNull() { + // SETUP: + // let us start by adding some entrires to the read lock manager + ReadLockManagerExtension testee = new ReadLockManagerExtension(); + testee.addReadLock(cacheKeyA); + testee.addReadLock(cacheKeyB); + Assert.assertEquals(2, testee.getReadLocks().size()); + Assert.assertEquals(1, testee.getMapThreadToReadLockAcquisitionMetadata().size()); + List cacheKeyMetadataForCurrentThread =testee.getMapThreadToReadLockAcquisitionMetadata().get(Thread.currentThread().getId()); + Assert.assertEquals(2, cacheKeyMetadataForCurrentThread.size()); + + // SETUP: + // now we are going to hack our state to put in here null entires both in the read locks map and in the list of metadata + // Validate that that the maps are now properly hacked + testee.hackReadLockManagerToBeCorruptedWithNullRecordsItsState(); + Assert.assertEquals(3, testee.getReadLocks().size()); + Assert.assertTrue( testee.getReadLocks().contains(null)); + cacheKeyMetadataForCurrentThread =testee.getMapThreadToReadLockAcquisitionMetadata().get(Thread.currentThread().getId()); + Assert.assertEquals(3, cacheKeyMetadataForCurrentThread.size()); + Assert.assertTrue( cacheKeyMetadataForCurrentThread.contains(null)); + + // EXECUTE + // Now we will try to REMOVE from the read lock manager a cache key that does not actually exist in the map + // this MUST have the side effect of causing the code to loop over ALL OF THE read lock manager metadata and spot that we + // have NULL records in the metadata array + // in so doing the code should self-heal and get rid of all that garbage + ConcurrencyManager cacheKeyNotExistingInTheReadLockManagerToCallFullLoopOverData = new CacheKey("cacheKeyNotExistingInTheReadLockManagerToCallFullLoopOverData"); + testee.removeReadLock(cacheKeyNotExistingInTheReadLockManagerToCallFullLoopOverData); + + // VERIFY that our code self healeded + Assert.assertEquals(2, testee.getReadLocks().size()); + Assert.assertFalse( testee.getReadLocks().contains(null)); + cacheKeyMetadataForCurrentThread =testee.getMapThreadToReadLockAcquisitionMetadata().get(Thread.currentThread().getId()); + Assert.assertEquals(2, cacheKeyMetadataForCurrentThread.size()); + Assert.assertFalse( cacheKeyMetadataForCurrentThread.contains(null)); + + } +} diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/config/PersistenceUnitProperties.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/config/PersistenceUnitProperties.java index 27ae1f4008b..9c54147e06d 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/config/PersistenceUnitProperties.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/config/PersistenceUnitProperties.java @@ -55,9 +55,11 @@ import java.io.File; import java.sql.DriverManager; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -3837,7 +3839,7 @@ public class PersistenceUnitProperties { * An NoSQL datasource is a non-relational datasource such as a legacy database, NoSQL database, * XML database, transactional and messaging systems, or ERP systems. * - * @see javax.resource.cci.ConnectionFactory + * {@code javax.resource.cci.ConnectionFactory} */ public static final String NOSQL_CONNECTION_FACTORY = "eclipselink.nosql.connection-factory"; @@ -4051,6 +4053,48 @@ public class PersistenceUnitProperties { */ public static final String CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION = "eclipselink.concurrency.manager.allow.interruptedexception"; + + /** + * This is the persistence.xml property that tells us how the + * org.eclipse.persistence.internal.sessions.AbstractSession.getCacheKeyFromTargetSessionForMergeScenarioMergeManagerNotNullAndCacheKeyOriginalStillNull(CacheKey, Object, ObjectBuilder, ClassDescriptor, MergeManager) + * should behave. + */ + public static final String CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION = "eclipselink.concurrency.manager.allow.abstractsession.getCacheKeyFromTargetSessionForMerge.modeofoperation"; + + + //TODO RFELCMAN enum - begin? + /** + * This should be the worst possible option. This is the one that gives us the issue: + * https://github.com/eclipse-ee4j/eclipselink/issues/2094. + */ + public static final String CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_BACKWARDS_COMPATIBILITY = "backwards-compatibility"; + /** + * This is far from an ideal approach but already better than the old backwards-compatibility approach. + * In this code path what will happen is that our merge manger thread will be doing a loop hoping for a cache key + * acquired by some other thread releases the cache key. + * If that never happens then our merge manager will blow up. + * The assumption being that it might be the holder of cache keys that are stopping the processes from going forward. + * That is what we saw in the issue https://github.com/eclipse-ee4j/eclipselink/issues/2094 + * where our merge manager thread was hold several write lock keys that were making it impossible for other threads doing object building + * to finish their work. + */ + public static final String CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_WAIT_LOOP_WITH_TIME_LIMIT = "wait-loop-with-time-limit"; + //TODO RFELCMAN enum - end? + + + public static final String CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_DEFAULT = CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_WAIT_LOOP_WITH_TIME_LIMIT; + + /** + * As the supported options for the {@link org.eclipse.persistence.config.PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION} + * persistence unit property. + */ + public static final List CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_LIST_OF_ALL_OPTIONS = Collections.unmodifiableList( Arrays.asList( + PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_BACKWARDS_COMPATIBILITY + , PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_WAIT_LOOP_WITH_TIME_LIMIT + )); + + + /** *

* This property control (enable/disable) if ConcurrencyException fired when dead-lock diagnostic is enabled. diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyManager.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyManager.java index 5d163dbd216..9bd8aa7a090 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyManager.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyManager.java @@ -18,7 +18,17 @@ import java.io.Serializable; import java.io.StringWriter; import java.security.AccessController; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -853,7 +863,7 @@ public void releaseAllLocksAcquiredByThread(DeferredLockManager lockManager) { * @return Never null if the read lock manager does not yet exist for the current thread. otherwise its read log * manager is returned. */ - protected static ReadLockManager getReadLockManager(Thread thread) { + public static ReadLockManager getReadLockManager(Thread thread) { Map readLockManagers = getReadLockManagers(); return readLockManagers.get(thread); } @@ -1107,4 +1117,31 @@ public Lock getInstanceLock() { public Condition getInstanceLockCondition() { return this.instanceLockCondition; } + + /** + * We check if cache keys is currently being owned for writing and if that owning thread happens to be the current thread doing the check. + * @return FALSE means either the thread is currently not owned by anybody for writing purposes. Or otherwise if is owned by some thread + * but the thread is not the current thread. TRUE is returned if and only if the cache key is being owned by some thread + * and that thread is not the current thread, it is some other competing thread. + */ + public synchronized boolean isAcquiredForWritingAndOwneddByADifferentThread() { + // (a) We start by using the traditional acquire implementation to check if cache key is acquired + // if the output says false we immediately return false + if(!this.isAcquired()) { + return false; + } + + // (b) If the active thread is not set then the cache keys is not acquired for writing by anybody + if(this.activeThread == null) { + return false; + } + + + + // (c) some thread is acquiring the cache key + // return need to check if it is a different thread than ours + Thread currentThread = Thread.currentThread(); + return this.activeThread != currentThread; + + } } diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java index 1e93eac323e..d56e6d78afd 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java @@ -15,6 +15,24 @@ // IBM - ConcurrencyUtil call of ThreadMXBean.getThreadInfo() needs doPriv package org.eclipse.persistence.internal.helper; +import java.io.StringWriter; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.security.AccessController; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.Vector; +import java.util.concurrent.atomic.AtomicLong; + +import org.eclipse.persistence.config.PersistenceUnitProperties; import org.eclipse.persistence.config.SystemProperties; import org.eclipse.persistence.internal.helper.type.CacheKeyToThreadRelationships; import org.eclipse.persistence.internal.helper.type.ConcurrencyManagerState; @@ -25,21 +43,24 @@ import org.eclipse.persistence.internal.security.PrivilegedAccessHelper; import org.eclipse.persistence.internal.security.PrivilegedGetSystemProperty; import org.eclipse.persistence.internal.security.PrivilegedGetThreadInfo; +import org.eclipse.persistence.internal.sessions.AbstractSession; import org.eclipse.persistence.logging.AbstractSessionLog; import org.eclipse.persistence.logging.SessionLog; -import java.io.StringWriter; -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.security.AccessController; -import java.util.*; -import java.util.concurrent.atomic.AtomicLong; - public class ConcurrencyUtil { public static final ConcurrencyUtil SINGLETON = new ConcurrencyUtil(); + /** + * The best default acquire wait time + * {@code property name="eclipselink.concurrency.manager.waittime" value="10"} + * + * is around 10 ms. + * + *The default of 0, meaning eternal wait, is not a good choice. + *This should most definitely change. + */ + //TODO RFELCMAN change default behaviour????? private static final long DEFAULT_ACQUIRE_WAIT_TIME = 0L; private static final long DEFAULT_BUILD_OBJECT_COMPLETE_WAIT_TIME = 0L; private static final long DEFAULT_MAX_ALLOWED_SLEEP_TIME_MS = 40000L; @@ -61,6 +82,45 @@ public class ConcurrencyUtil { private long maxAllowedFrequencyToProduceTinyDumpLogMessage = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_MAX_FREQUENCY_DUMP_TINY_MESSAGE, DEFAULT_MAX_ALLOWED_FREQUENCY_TINY_DUMP_LOG_MESSAGE); private long maxAllowedFrequencyToProduceMassiveDumpLogMessage = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_MAX_FREQUENCY_DUMP_MASSIVE_MESSAGE, DEFAULT_MAX_ALLOWED_FREQUENCY_MASSIVE_DUMP_LOG_MESSAGE); private boolean allowInterruptedExceptionFired = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION, DEFAULT_INTERRUPTED_EXCEPTION_FIRED); + + /** + * See https://github.com/eclipse-ee4j/eclipselink/issues/2094 + * {@code org.eclipse.persistence.internal.sessions.AbstractSession.getCacheKeyFromTargetSessionForMerge(Object, ObjectBuilder, ClassDescriptor, MergeManager)} + * + * + * This flag we use if the merge lock manager is stuck building waiting to get a + * cache key with the original object populated. + * + * Since this is a relatively rare dead lock apttern, instead of using the + * {@link org.eclipse.persistence.config.PersistenceUnitProperties#CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION} + * which should normally be the case, we just use this hard-coded constant. + * Why because by default we wan to have teh org.eclipse.persistence.config.PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION saying FALSE + * do not blow up exceptions + * and we prefert to allow the blowup in very rare exclusive code areas. + * + * + *

Important, there is a second parameter the {@link org.eclipse.persistence.config.PersistenceUnitProperties#CONCURRENCY_MANAGER_ALLOW_CONCURRENCY_EXCEPTION} + *this is associated with the {@link org.eclipse.persistence.internal.helper.ConcurrencyUtil#isAllowConcurrencyExceptionToBeFiredUp()}. + * + * That configuraton parameter should be set to true if we want blows up to be fired up when we cal the concurrency util. + * + see: org.eclipse.persistence.config.PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION + + alow exception to be fired up + see: + + * + */ + private boolean allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception = Boolean.TRUE; + + /** + * What sort of approach should be using in the {@code org.eclipse.persistence.internal.sessions.AbstractSession.getCacheKeyFromTargetSessionForMerge(Object, ObjectBuilder, ClassDescriptor, MergeManager)} + * due to all the dedlock problems we have encountered in the issue: + * https://github.com/eclipse-ee4j/eclipselink/issues/2094. + * + */ + private String abstractSessionModeOfOperationOfMergeManagerGetCacheKey = org.eclipse.persistence.config.PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_DEFAULT; + private boolean allowConcurrencyExceptionToBeFiredUp = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_ALLOW_CONCURRENCY_EXCEPTION, DEFAULT_CONCURRENCY_EXCEPTION_FIRED); private boolean allowTakingStackTraceDuringReadLockAcquisition = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_ALLOW_STACK_TRACE_READ_LOCK, DEFAULT_TAKING_STACKTRACE_DURING_READ_LOCK_ACQUISITION); @@ -71,6 +131,9 @@ public class ConcurrencyUtil { private long concurrencySemaphoreMaxTimePermit = getLongProperty(SystemProperties.CONCURRENCY_SEMAPHORE_MAX_TIME_PERMIT, DEFAULT_CONCURRENCY_SEMAPHORE_MAX_TIME_PERMIT); private long concurrencySemaphoreLogTimeout = getLongProperty(SystemProperties.CONCURRENCY_SEMAPHORE_LOG_TIMEOUT, DEFAULT_CONCURRENCY_SEMAPHORE_LOG_TIMEOUT); + public static final boolean WE_BELIEVE_TO_BE_IN_A_DEADLOCK_TRUE = true; + public static final boolean WE_BELIEVE_TO_BE_IN_A_DEADLOCK_FALSE = false; + /** * Thread local variable that allows the current thread to know when was the last time that this specific thread * produced the "tiny dump" indicating that the thread is stuck. @@ -124,8 +187,13 @@ private ConcurrencyUtil() { * @throws InterruptedException * we fire an interrupted exception to ensure that the code blows up and releases all of the locks it * had. + * + * + * @return Returns a boolean value if the code believes that it is stuck. TRUE means the code ended up either logging a Tiny Dump message or the massive dump message. FALSE means that the code + * did not do any logging and just quickly returned. This boolean flag is especially meaningful if we have the interrupt exceptions disabled + * and so the method is not being caused to blow up. In that case there is still a way to know if we believe to be stuck. */ - public void determineIfReleaseDeferredLockAppearsToBeDeadLocked(ConcurrencyManager concurrencyManager, + public boolean determineIfReleaseDeferredLockAppearsToBeDeadLocked(ConcurrencyManager concurrencyManager, final long whileStartTimeMillis, DeferredLockManager lockManager, ReadLockManager readLockManager, boolean callerIsWillingToAllowInterruptedExceptionToBeFiredUpIfNecessary) throws InterruptedException { @@ -138,7 +206,7 @@ public void determineIfReleaseDeferredLockAppearsToBeDeadLocked(ConcurrencyManag if (!tooMuchTimeHasElapsed) { // this thread is not stuck for that long let us allow the code to continue waiting for the lock to be acquired // or for the deferred locks to be considered as finished - return; + return WE_BELIEVE_TO_BE_IN_A_DEADLOCK_FALSE; } // (b) We believe this is a dead lock @@ -155,7 +223,8 @@ public void determineIfReleaseDeferredLockAppearsToBeDeadLocked(ConcurrencyManag // this thread has recently logged a small message about the fact that it is stuck // no point in logging another message like that for some time // let us allow for this thread to silently continue stuck without logging anything - return ; + // note: we might not have produced a tiny message to not spam the log but we still believe to be in a deadlock at this point + return WE_BELIEVE_TO_BE_IN_A_DEADLOCK_TRUE; } // (c) This thread it is dead lock since the whileStartDate indicates a dead lock and @@ -197,6 +266,7 @@ public void determineIfReleaseDeferredLockAppearsToBeDeadLocked(ConcurrencyManag } else { AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE,"concurrency_manager_allow_concurrency_exception_fired_up"); } + return WE_BELIEVE_TO_BE_IN_A_DEADLOCK_TRUE; } /** @@ -266,6 +336,10 @@ public void setMaxAllowedFrequencyToProduceMassiveDumpLogMessage(long maxAllowed this.maxAllowedFrequencyToProduceMassiveDumpLogMessage = maxAllowedFrequencyToProduceMassiveDumpLogMessage; } + /** + * See see {@link org.eclipse.persistence.config.PersistenceUnitProperties#CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION} + * @return + */ public boolean isAllowInterruptedExceptionFired() { return this.allowInterruptedExceptionFired; } @@ -275,6 +349,47 @@ public void setAllowInterruptedExceptionFired(boolean allowInterruptedExceptionF } /** + * see {@link #allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception} + * + */ + public boolean isAllowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception() { + return allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception; + } + /** + * see {@link #allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception} + */ + public void setAllowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception(boolean allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception) { + this.allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception = allowAbstractSessionGetCacheKeyFromTargetSessionForMergeInterruptedexception; + } + + public void setAbstractSessionModeOfOperationOfMergeManagerGetCacheKey(String abstractSessionModeOfOperationOfMergeManagerGetCacheKey) { + if(PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_LIST_OF_ALL_OPTIONS.contains(abstractSessionModeOfOperationOfMergeManagerGetCacheKey)) { + this.abstractSessionModeOfOperationOfMergeManagerGetCacheKey = abstractSessionModeOfOperationOfMergeManagerGetCacheKey; + } else { + this.abstractSessionModeOfOperationOfMergeManagerGetCacheKey = PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_DEFAULT; + } + } + + public boolean isAbstractSessionModeOfOperationOfMergeManagerGetCacheKeyBackwardsCompatibility() { + return PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_BACKWARDS_COMPATIBILITY + .equals(this.abstractSessionModeOfOperationOfMergeManagerGetCacheKey); + } + + /** + * Instead of doing the old approach of an eternal inifite wait, the code can do a timed wait and if it believes it is stuck for too long, + * just go out of the method returning the cache key. The code above the abstract session, + * namely the method {@code org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(Object, ObjectChangeSet, ClassDescriptor, AbstractSession, UnitOfWorkImpl)} + * seems to have all manner of tricks up its sleeve to try to deal with a cacheKey whose original is still empty. + */ + public boolean isAbstractSessionModeOfOperationOfMergeManagerGetCacheKeyWaitLoopWithTimeLimit() { + return PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION_OPTION_WAIT_LOOP_WITH_TIME_LIMIT + .equals(this.abstractSessionModeOfOperationOfMergeManagerGetCacheKey); + } + + + + /** + * see {@link org.eclipse.persistence.config.PersistenceUnitProperties#CONCURRENCY_MANAGER_ALLOW_CONCURRENCY_EXCEPTION} * @return true if we are supposed to be firing up exception to abort the thread in a dead lock, false we are afraid * of trying to abort the transaction and not managing to resolve the dead lock and prefer to system frozen * and be forced into restarting it. @@ -355,20 +470,48 @@ public String createToStringExplainingOwnedCacheKey(ConcurrencyManager concurren Date concurrencyManagerCreationDate = concurrencyManager.getConcurrencyManagerCreationDate(); if (concurrencyManager instanceof CacheKey) { CacheKey cacheKey = (CacheKey) concurrencyManager; - Object primaryKey = cacheKey.getKey(); + // Patching the code as part of + // TRK-32315 + // where the try to port the improvements of the hacked eclipselink 2.6.5 to prevent null pointer exceptions + // when generating the massive dump + // see also the tracker: TRK-25534 + // and the original patching commit: + // eclipselink/eclipselink-2.6.5-wls-12.2.1.3/-/commit/f0f9391567333af43465892430581802ac151d1f + + // Object primaryKey = cacheKey.getKey(); + Object primaryKey = cacheKey.getKeyEnsuredToBeNotNull(); + Object primaryKeyAtTimeOfCreation = cacheKey.getPrimaryKeyAtTimeOfCreation(); + long cacheKeyInstanceCreatonNumber = cacheKey.getCacheKeyInstanceCreationNumber(); + Object cacheKeyObject = cacheKey.getObject(); String canonicalName = cacheKeyObject != null ? cacheKeyObject.getClass().getCanonicalName() : TraceLocalization.buildMessage("concurrency_util_owned_cache_key_null"); - return TraceLocalization.buildMessage("concurrency_util_owned_cache_key_is_cache_key", new Object[] {canonicalName, primaryKey, - cacheKeyObject, String.valueOf(System.identityHashCode(cacheKeyObject)), + return TraceLocalization.buildMessage("concurrency_util_owned_cache_key_is_cache_key" + , new Object[] { + // 0, 1 + canonicalName, primaryKey, + //2 + String.valueOf(System.identityHashCode(cacheKeyObject)), + + // 3,4 cacheKeyClass, String.valueOf(System.identityHashCode(cacheKey)), + // 5,6,7 activeThread, concurrencyManager.getNumberOfReaders(), concurrencyManagerId, + // 8 ConversionManager.getDefaultManager().convertObject(concurrencyManagerCreationDate, String.class).toString(), - // metadata of number of times the cache key suffered increases in number readers + + // 9 metadata of number of times the cache key suffered increases in number readers cacheKey.getTotalNumberOfKeysAcquiredForReading(), + // 10 cacheKey.getTotalNumberOfKeysReleasedForReading(), + // 11 cacheKey.getTotalNumberOfKeysReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero(), - concurrencyManager.getDepth()}); + // 12 + concurrencyManager.getDepth() + // + // parameters 13, 14 + , cacheKeyInstanceCreatonNumber, primaryKeyAtTimeOfCreation + }); } else { return TraceLocalization.buildMessage("concurrency_util_owned_cache_key_is_not_cache_key", new Object[] {cacheKeyClass, concurrencyManager, activeThread, concurrencyManagerId, ConversionManager.getDefaultManager().convertObject(concurrencyManagerCreationDate, String.class).toString(), @@ -474,6 +617,9 @@ public void dumpConcurrencyManagerInformationIfAppropriate() { Set setThreadWaitingToReleaseDeferredLocksOriginal = ConcurrencyManager.getThreadsWaitingToReleaseDeferredLocks(); Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone = ConcurrencyManager.getThreadsWaitingToReleaseDeferredLocksJustification(); Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal = WriteLockManager.getMapWriteLockManagerThreadToObjectIdsWithChangeSet(); + // introduced in 2023.04.27 because of: + // https://github.com/eclipse-ee4j/eclipselink/issues/2094 + Map mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired = AbstractSession.getThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired(); dumpConcurrencyManagerInformationStep01( deferredLockManagers, readLockManagersOriginal, @@ -484,7 +630,8 @@ public void dumpConcurrencyManagerInformationIfAppropriate() { mapThreadToWaitOnAcquireReadLockMethodNameOriginal, setThreadWaitingToReleaseDeferredLocksOriginal, mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, - mapThreadToObjectIdWithWriteLockManagerChangesOriginal); + mapThreadToObjectIdWithWriteLockManagerChangesOriginal + , mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired); } /** @@ -528,6 +675,15 @@ public void dumpConcurrencyManagerInformationIfAppropriate() { * a thread that has done too many changes and is creating a bigger risk fo dead lock. The more resources * an individual thread tries to grab the worse it is for the concurrency layer. The size of the change * set can be interesting. + * + * @param mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired + * This parameter relates to the + * {@link org.eclipse.persistence.internal.sessions.AbstractSession#THREADS_INVOLVED_WITH_MERGE_MANAGER_WAITING_FOR_DEFERRED_CACHE_KEYS_TO_NO_LONGER_BE_ACQUIRED} + * and to the bug https://github.com/eclipse-ee4j/eclipselink/issues/2094 it allows us to keep an eye on threads that are at post-commit phase + * and are trying to merge their change set into the original objects in the cache. + * When this is taking place some of the cache keys that the merge manager is needing might be locked by other threads. + * This can lead to deadlocks, if our merge manager thread happens the be the owner of cache keys that matter + * to the owner of the cache keys the merge manager will need to acquire. */ protected void dumpConcurrencyManagerInformationStep01(Map deferredLockManagers, Map readLockManagersOriginal, @@ -538,7 +694,8 @@ protected void dumpConcurrencyManagerInformationStep01(Map mapThreadToWaitOnAcquireReadLockMethodNameOriginal, Set setThreadWaitingToReleaseDeferredLocksOriginal, Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, - Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal) { + Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal, + Map mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired ) { // (a) create object to represent our cache state. ConcurrencyManagerState concurrencyManagerState = createConcurrencyManagerState( deferredLockManagers, @@ -550,7 +707,8 @@ protected void dumpConcurrencyManagerInformationStep01(Map mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired) { + + + // (a) Create a header string of information + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_threads_having_difficulty_getting_cache_keys_with_object_different_than_null_during_merge_clones_to_cache_after_transaction_commit_page_header" + , new Object[] {mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired.size()})); + int currentThreadNumber = 0; + for (Entry currentEntry : mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired + .entrySet()) { + currentThreadNumber++; + Thread thread = currentEntry.getKey(); + String justification = currentEntry.getValue(); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_threads_having_difficulty_getting_cache_keys_with_object_different_than_null_during_merge_clones_to_cache_after_transaction_commit_justification", + new Object[] {currentThreadNumber, thread.getName(), justification})); + } + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_threads_having_difficulty_getting_cache_keys_with_object_different_than_null_during_merge_clones_to_cache_after_transaction_commit_page_end")); + return writer.toString(); + } + /** * Log information focusing on the different cache keys where threads have hooks on the thread. * @@ -683,7 +896,8 @@ public ConcurrencyManagerState createConcurrencyManagerState( Map mapThreadToWaitOnAcquireReadLockMethodNameOriginal, Set setThreadWaitingToReleaseDeferredLocksOriginal, Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, - Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal) { + Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal, + Map mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired) { // (a) As a first step we want to clone-copy the two maps // once we start working with the maps and using them to do dead lock detection // or simply print the state of the system we do not want the maps to continue changing as the threads referenced in the maps @@ -740,7 +954,8 @@ public ConcurrencyManagerState createConcurrencyManagerState( readLockManagerMapClone, deferredLockManagerMapClone, unifiedMapOfThreadsStuckTryingToAcquireWriteLock, mapThreadToWaitOnAcquireMethodNameClone, mapThreadToWaitOnAcquireReadLockClone, mapThreadToWaitOnAcquireReadLockMethodNameClone, setThreadWaitingToReleaseDeferredLocksClone, mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, - mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, mapThreadToObjectIdWithWriteLockManagerChangesClone); + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, mapThreadToObjectIdWithWriteLockManagerChangesClone + , mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired); } /** @@ -847,11 +1062,39 @@ private String createStringWithSummaryOfReadLocksAcquiredByThread(ReadLockManage int readLockNumber = 0; for (ReadLockAcquisitionMetadata currentReadLockAcquiredAndNeverReleased : readLocksAcquiredByThread) { readLockNumber++; - writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_3", new Object[] {readLockNumber, - SINGLETON.createToStringExplainingOwnedCacheKey(currentReadLockAcquiredAndNeverReleased.getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()), - ConversionManager.getDefaultManager().convertObject(currentReadLockAcquiredAndNeverReleased.getDateOfReadLockAcquisition(), String.class).toString(), - currentReadLockAcquiredAndNeverReleased.getNumberOfReadersOnCacheKeyBeforeIncrementingByOne(), - currentReadLockAcquiredAndNeverReleased.getCurrentThreadStackTraceInformationCpuTimeCostMs()})); + if(currentReadLockAcquiredAndNeverReleased == null) { + // Note: This line of code should never be executed in real life + // however if we look at the issue: + // TRK-32315: https://github.com/eclipse-ee4j/eclipselink/issues/2114 + // We can see a null pointer exception taking place at + // at org.eclipse.persistence.internal.helper.ConcurrencyUtil.createStringWithSummaryOfReadLocksAcquiredByThread(ConcurrencyUtil.java:891) ~[eclipselink_2_7_6_hacked.jar] + // This was almost certainly the case because the mehod readLockManager.getMapThreadToReadLockAcquisitionMetadata() + // in the past was not returning a deep copy of the map, hence while producing a massive dump + // another thread might be acquiring / releasing read locks and be manipulating the state we are looping over here + // this is no longer the case since the createStringWithSummaryOfReadLocksAcquiredByThread now returns a deep copy + // in any case we add this piece of paranoia code to reduce the probablity of issue as much as possible + // Note 2: We have also further strengthened the code the ReadLockManager itself. + // We have added more sanity checks into that code. Most likely the blow ups of NPException the read lock manager + // were the result of this looping over lists that were not thread safe + + continue; + } + + if(currentReadLockAcquiredAndNeverReleased == null) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_3", new Object[] { + // param 00 + readLockNumber, + // param 01 + SINGLETON.createToStringExplainingOwnedCacheKey(currentReadLockAcquiredAndNeverReleased.getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()), + // param 02 + ConversionManager.getDefaultManager().convertObject(currentReadLockAcquiredAndNeverReleased.getDateOfReadLockAcquisition(), String.class).toString(), + // param 03 + currentReadLockAcquiredAndNeverReleased.getNumberOfReadersOnCacheKeyBeforeIncrementingByOne(), + // param 04 + currentReadLockAcquiredAndNeverReleased.getCurrentThreadStackTraceInformationCpuTimeCostMs() + // finish write block. + })); + } String stackTraceInformation = currentReadLockAcquiredAndNeverReleased.getCurrentThreadStackTraceInformation(); if (stackTraceStringToStackTraceExampleNumber.containsKey(stackTraceInformation)) { // we can spare the massive dump from being any fatter we have alreayd added a stack trace id that diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java.ORIGIN b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java.ORIGIN new file mode 100644 index 00000000000..1e93eac323e --- /dev/null +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ConcurrencyUtil.java.ORIGIN @@ -0,0 +1,1692 @@ +/* + * Copyright (c) 2021, 2024 Oracle, IBM and/or its affiliates. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v. 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0, + * or the Eclipse Distribution License v. 1.0 which is available at + * http://www.eclipse.org/org/documents/edl-v10.php. + * + * SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause + */ + +// Contributors: +// Oracle - initial API and implementation from Oracle TopLink +// IBM - ConcurrencyUtil call of ThreadMXBean.getThreadInfo() needs doPriv +package org.eclipse.persistence.internal.helper; + +import org.eclipse.persistence.config.SystemProperties; +import org.eclipse.persistence.internal.helper.type.CacheKeyToThreadRelationships; +import org.eclipse.persistence.internal.helper.type.ConcurrencyManagerState; +import org.eclipse.persistence.internal.helper.type.DeadLockComponent; +import org.eclipse.persistence.internal.helper.type.ReadLockAcquisitionMetadata; +import org.eclipse.persistence.internal.identitymaps.CacheKey; +import org.eclipse.persistence.internal.localization.TraceLocalization; +import org.eclipse.persistence.internal.security.PrivilegedAccessHelper; +import org.eclipse.persistence.internal.security.PrivilegedGetSystemProperty; +import org.eclipse.persistence.internal.security.PrivilegedGetThreadInfo; +import org.eclipse.persistence.logging.AbstractSessionLog; +import org.eclipse.persistence.logging.SessionLog; + +import java.io.StringWriter; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.security.AccessController; +import java.util.*; +import java.util.concurrent.atomic.AtomicLong; + +public class ConcurrencyUtil { + + public static final ConcurrencyUtil SINGLETON = new ConcurrencyUtil(); + + private static final long DEFAULT_ACQUIRE_WAIT_TIME = 0L; + private static final long DEFAULT_BUILD_OBJECT_COMPLETE_WAIT_TIME = 0L; + private static final long DEFAULT_MAX_ALLOWED_SLEEP_TIME_MS = 40000L; + private static final long DEFAULT_MAX_ALLOWED_FREQUENCY_TINY_DUMP_LOG_MESSAGE = 40000L; + private static final long DEFAULT_MAX_ALLOWED_FREQUENCY_MASSIVE_DUMP_LOG_MESSAGE = 60000L; + private static final boolean DEFAULT_INTERRUPTED_EXCEPTION_FIRED = true; + private static final boolean DEFAULT_CONCURRENCY_EXCEPTION_FIRED = true; + private static final boolean DEFAULT_TAKING_STACKTRACE_DURING_READ_LOCK_ACQUISITION = false; + public static final boolean DEFAULT_USE_SEMAPHORE_TO_SLOW_DOWN_OBJECT_BUILDING_CONCURRENCY = false; + public static final boolean DEFAULT_USE_SEMAPHORE_TO_SLOW_DOWN_WRITE_LOCK_MANAGER_ACQUIRE_REQUIRED_LOCKS = false; + public static final int DEFAULT_CONCURRENCY_MANAGER_OBJECT_BUILDING_NO_THREADS = 10; + public static final int DEFAULT_CONCURRENCY_MANAGER_WRITE_LOCK_MANAGER_ACQUIRE_REQUIRED_LOCKS_NO_THREADS = 2; + public static final long DEFAULT_CONCURRENCY_SEMAPHORE_MAX_TIME_PERMIT = 2000L; + public static final long DEFAULT_CONCURRENCY_SEMAPHORE_LOG_TIMEOUT = 10000L; + + private long acquireWaitTime = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_ACQUIRE_WAIT_TIME, DEFAULT_ACQUIRE_WAIT_TIME); + private long buildObjectCompleteWaitTime = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_BUILD_OBJECT_COMPLETE_WAIT_TIME, DEFAULT_BUILD_OBJECT_COMPLETE_WAIT_TIME); + private long maxAllowedSleepTime = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_MAX_SLEEP_TIME, DEFAULT_MAX_ALLOWED_SLEEP_TIME_MS); + private long maxAllowedFrequencyToProduceTinyDumpLogMessage = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_MAX_FREQUENCY_DUMP_TINY_MESSAGE, DEFAULT_MAX_ALLOWED_FREQUENCY_TINY_DUMP_LOG_MESSAGE); + private long maxAllowedFrequencyToProduceMassiveDumpLogMessage = getLongProperty(SystemProperties.CONCURRENCY_MANAGER_MAX_FREQUENCY_DUMP_MASSIVE_MESSAGE, DEFAULT_MAX_ALLOWED_FREQUENCY_MASSIVE_DUMP_LOG_MESSAGE); + private boolean allowInterruptedExceptionFired = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_ALLOW_INTERRUPTED_EXCEPTION, DEFAULT_INTERRUPTED_EXCEPTION_FIRED); + private boolean allowConcurrencyExceptionToBeFiredUp = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_ALLOW_CONCURRENCY_EXCEPTION, DEFAULT_CONCURRENCY_EXCEPTION_FIRED); + private boolean allowTakingStackTraceDuringReadLockAcquisition = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_ALLOW_STACK_TRACE_READ_LOCK, DEFAULT_TAKING_STACKTRACE_DURING_READ_LOCK_ACQUISITION); + + private boolean useSemaphoreInObjectBuilder = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_USE_SEMAPHORE_TO_SLOW_DOWN_OBJECT_BUILDING, DEFAULT_USE_SEMAPHORE_TO_SLOW_DOWN_OBJECT_BUILDING_CONCURRENCY); + private boolean useSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks = getBooleanProperty(SystemProperties.CONCURRENCY_MANAGER_USE_SEMAPHORE_TO_SLOW_DOWN_WRITE_LOCK_MANAGER_ACQUIRE_REQUIRED_LOCKS, DEFAULT_USE_SEMAPHORE_TO_SLOW_DOWN_WRITE_LOCK_MANAGER_ACQUIRE_REQUIRED_LOCKS); + private int noOfThreadsAllowedToObjectBuildInParallel = getIntProperty(SystemProperties.CONCURRENCY_MANAGER_OBJECT_BUILDING_NO_THREADS, DEFAULT_CONCURRENCY_MANAGER_OBJECT_BUILDING_NO_THREADS); + private int noOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel = getIntProperty(SystemProperties.CONCURRENCY_MANAGER_WRITE_LOCK_MANAGER_ACQUIRE_REQUIRED_LOCKS_NO_THREADS, DEFAULT_CONCURRENCY_MANAGER_WRITE_LOCK_MANAGER_ACQUIRE_REQUIRED_LOCKS_NO_THREADS); + private long concurrencySemaphoreMaxTimePermit = getLongProperty(SystemProperties.CONCURRENCY_SEMAPHORE_MAX_TIME_PERMIT, DEFAULT_CONCURRENCY_SEMAPHORE_MAX_TIME_PERMIT); + private long concurrencySemaphoreLogTimeout = getLongProperty(SystemProperties.CONCURRENCY_SEMAPHORE_LOG_TIMEOUT, DEFAULT_CONCURRENCY_SEMAPHORE_LOG_TIMEOUT); + + /** + * Thread local variable that allows the current thread to know when was the last time that this specific thread + * produced the "tiny dump" indicating that the thread is stuck. + */ + private final ThreadLocal threadLocalDateWhenCurrentThreadLastComplainedAboutBeingStuckInDeadLock = new ThreadLocal<>(); + + /** + * Whenever we produce a tiny dump log message we will give it a unit identifier + */ + private final AtomicLong currentTinyMessageLogDumpNumber = new AtomicLong(0); + + /** + * Whenever we produce a massive dump log message we will give it a unit identifier + */ + private final AtomicLong currentMassiveDumpMessageLogDumpNumber = new AtomicLong(0); + + private final Object dateWhenLastConcurrencyManagerStateFullDumpWasPerformedLock = new Object(); + + /** + * Whenever we decide to log a massive dump of the state of the concurrency manager we need to make this date move forward. + *

+ * This variable is telling any thread that might be considering the possibility of logging a massive dump log message, + * when a massive dump was last performed, thus allowing threads to avoid spamming too much. + * + *

+ * NOTE:
+ * Needs to be accessed in a synchronized method. + */ + private long dateWhenLastConcurrencyManagerStateFullDumpWasPerformed = 0L; + + /** + * When we are explaining where read locks were acquired, the first time we see a new stack trace we create a stack + * trace id. Then for all other read locks we just say in the massive please go have a look at stack trace xxx. + */ + private final AtomicLong stackTraceIdAtomicLong = new AtomicLong(0); + + private ConcurrencyUtil() { + } + + /** + * Throw an interrupted exception if appears that eclipse link code is taking too long to release a deferred lock. + * + * @param whileStartTimeMillis + * the start date of the while tru loop for releasing a deferred lock + * @param callerIsWillingToAllowInterruptedExceptionToBeFiredUpIfNecessary + * this flag is to allow the write lock manager to say that it is afraid of a concurrency exception being + * fire up because the thread in a dead lock might be trying to do a commit and blowing these threads up + * is most likely too dangerous and possibly the eclipselink code is not robust enough to code with such + * scenarios We do not care so much about blowing up exception during object building but during + * committing of transactions we are very afraid + * @throws InterruptedException + * we fire an interrupted exception to ensure that the code blows up and releases all of the locks it + * had. + */ + public void determineIfReleaseDeferredLockAppearsToBeDeadLocked(ConcurrencyManager concurrencyManager, + final long whileStartTimeMillis, DeferredLockManager lockManager, ReadLockManager readLockManager, + boolean callerIsWillingToAllowInterruptedExceptionToBeFiredUpIfNecessary) + throws InterruptedException { + // (a) Determine if we believe to be dealing with a dead lock + + final long maxAllowedSleepTimeMillis = ConcurrencyUtil.SINGLETON.getMaxAllowedSleepTime(); + long whileCurrentTimeMillis = System.currentTimeMillis(); + long elapsedTime = whileCurrentTimeMillis - whileStartTimeMillis; + boolean tooMuchTimeHasElapsed = tooMuchTimeHasElapsed(whileStartTimeMillis, maxAllowedSleepTimeMillis); + if (!tooMuchTimeHasElapsed) { + // this thread is not stuck for that long let us allow the code to continue waiting for the lock to be acquired + // or for the deferred locks to be considered as finished + return; + } + + // (b) We believe this is a dead lock + // before we start spamming the server log lets make sure this thread has not spammed the server log too recently + if(threadLocalDateWhenCurrentThreadLastComplainedAboutBeingStuckInDeadLock.get() == null) { + // make sure the thread local variable never returns null + threadLocalDateWhenCurrentThreadLastComplainedAboutBeingStuckInDeadLock.set(new Date(0)); + } + Date dateWhenTinyCurrentThreadBeingStuckMessageWasLastLogged = threadLocalDateWhenCurrentThreadLastComplainedAboutBeingStuckInDeadLock.get(); + final long maxAllowedFrequencyToDumpTinyMessage = getMaxAllowedFrequencyToProduceTinyDumpLogMessage(); + boolean tooMuchTimeHasElapsedSinceLastLoggingOfTinyMessage = tooMuchTimeHasElapsed(dateWhenTinyCurrentThreadBeingStuckMessageWasLastLogged.getTime(), maxAllowedFrequencyToDumpTinyMessage); + + if(!tooMuchTimeHasElapsedSinceLastLoggingOfTinyMessage) { + // this thread has recently logged a small message about the fact that it is stuck + // no point in logging another message like that for some time + // let us allow for this thread to silently continue stuck without logging anything + return ; + } + + // (c) This thread it is dead lock since the whileStartDate indicates a dead lock and + // this thread has been keeping silent about the problem for some time since the dateWhenTinyCurrentThreadBeingStuckMessageWasLastLogged + // indicates that quite some time has elapsed since we have last spammed the server log + // we now start by spamming into the server log a "tiny message" specific to the current thread + String tinyErrorMessage = currentThreadIsStuckForSomeTimeProduceTinyLogMessage(elapsedTime, concurrencyManager, lockManager, readLockManager); + + // (d) next step is to log into the server log the massive dump log message where we try to explaing the concrrency mangaer state + // only one thread will suceed in doing the massive dump ever 1 minute or so + // we do not want that a massive dump is log all the time + dumpConcurrencyManagerInformationIfAppropriate(); + + // (e) Finaly we need to check what the user wants us to when we decide that we are in the middle of a dead lock + // and we have dumped whatever information we could dump + // does the user want us to blow up the thread to try release acquired locks and allow other threads to move forward + // or is the user afraid that we fire up a thread interrupted exception because if the dead lock does not resolve + // production will be serously affect by aborted business process that should normally have suceeded and after N rerties + // (e.g. 3 jms MDB message retries) the process is aborted as failed making live system recovery extermelly difficult? + // the project must decide how to forward here... + // a frozen system seems for the time being the safest course of action + // because the interrupted exception might be leaving the cocurrency manager corrupted in terms f the cache keys and the readers on the cache keys + // NOTE: + // This project has reported that our blowing up of the JTA transaction + // to release the dead lock is not being 100% effective the system can still freeze forever + // And if interrupting the thread and releasing its resources is not effective + // then we are worse off. + // Best is to leave the system frozen and simply spam into the log of the server + // the current state of cache + boolean allowConcurrencyExceptionToBeFiredUp = isAllowConcurrencyExceptionToBeFiredUp(); + if (allowConcurrencyExceptionToBeFiredUp) { + // The first check if in general concurrency excpetions to resolve the dead locks can be fired is passed + // but we do one final check. The WriteLockManager is afraid of seing its thread being blown up + // so the write lock manager will be prohibiting this exception from being fired up + if (callerIsWillingToAllowInterruptedExceptionToBeFiredUpIfNecessary) { + throw new InterruptedException(tinyErrorMessage); + } + + } else { + AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE,"concurrency_manager_allow_concurrency_exception_fired_up"); + } + } + + /** + * @return "eclipselink.concurrency.manager.waittime" persistence property value. + */ + public long getAcquireWaitTime() { + return this.acquireWaitTime; + } + + public void setAcquireWaitTime(long acquireWaitTime) { + this.acquireWaitTime = acquireWaitTime; + } + + /** + * @return "eclipselink.concurrency.manager.build.object.complete.waittime" persistence property value. + */ + public long getBuildObjectCompleteWaitTime() { + return buildObjectCompleteWaitTime; + } + + public void setBuildObjectCompleteWaitTime(long buildObjectCompleteWaitTime) { + this.buildObjectCompleteWaitTime = buildObjectCompleteWaitTime; + } + + /** + * @return property to control how long we are willing to wait before firing up an exception + */ + public long getMaxAllowedSleepTime() { + return this.maxAllowedSleepTime; + } + + public void setMaxAllowedSleepTime(long maxAllowedSleepTime) { + this.maxAllowedSleepTime = maxAllowedSleepTime; + } + + /** + * Just like we have a massive dump log message see {@link #getMaxAllowedFrequencyToProduceMassiveDumpLogMessage()} + * we also want threads to produce "tiny" dump about the fact that they rae stuck. We want to avoid these threads + * spaming too much the server log ... once the log message is out there not much point in continuously pumping the + * same log message out over and over again. Controlling how frequently the tiny dump is important especially when + * the user configures the hacked eclipselink to not fire up a blow up exception and instead to allow eclipselink to + * remain frozen forever. + * + * @return the frequency with which we are allowed to create a tiny dump log message + */ + public long getMaxAllowedFrequencyToProduceTinyDumpLogMessage() { + return this.maxAllowedFrequencyToProduceTinyDumpLogMessage; + } + + public void setMaxAllowedFrequencyToProduceTinyDumpLogMessage(long maxAllowedFrequencyToProduceTinyDumpLogMessage) { + this.maxAllowedFrequencyToProduceTinyDumpLogMessage = maxAllowedFrequencyToProduceTinyDumpLogMessage; + } + + /** + * If the system is perceived to be frozen and not evolving any longer, we will allow that every so often (e.g. once + * a minute) the logic complaining that the thread is stuck and going nowhere logs a very big dump message where the + * FULL concurrency manager state is explained. So that we can (manually) try to understand the dead lock based on + * the dumped information + * + * See also {@link #dateWhenLastConcurrencyManagerStateFullDumpWasPerformed}. + */ + public long getMaxAllowedFrequencyToProduceMassiveDumpLogMessage() { + return this.maxAllowedFrequencyToProduceMassiveDumpLogMessage; + } + + public void setMaxAllowedFrequencyToProduceMassiveDumpLogMessage(long maxAllowedFrequencyToProduceMassiveDumpLogMessage) { + this.maxAllowedFrequencyToProduceMassiveDumpLogMessage = maxAllowedFrequencyToProduceMassiveDumpLogMessage; + } + + public boolean isAllowInterruptedExceptionFired() { + return this.allowInterruptedExceptionFired; + } + + public void setAllowInterruptedExceptionFired(boolean allowInterruptedExceptionFired) { + this.allowInterruptedExceptionFired = allowInterruptedExceptionFired; + } + + /** + * @return true if we are supposed to be firing up exception to abort the thread in a dead lock, false we are afraid + * of trying to abort the transaction and not managing to resolve the dead lock and prefer to system frozen + * and be forced into restarting it. + */ + public boolean isAllowConcurrencyExceptionToBeFiredUp() { + return this.allowConcurrencyExceptionToBeFiredUp; + } + + public void setAllowConcurrencyExceptionToBeFiredUp(boolean allowConcurrencyExceptionToBeFiredUp) { + this.allowConcurrencyExceptionToBeFiredUp = allowConcurrencyExceptionToBeFiredUp; + } + + public boolean isAllowTakingStackTraceDuringReadLockAcquisition() { + return this.allowTakingStackTraceDuringReadLockAcquisition; + } + + public void setAllowTakingStackTraceDuringReadLockAcquisition(boolean allowTakingStackTraceDuringReadLockAcquisition) { + this.allowTakingStackTraceDuringReadLockAcquisition = allowTakingStackTraceDuringReadLockAcquisition; + } + + public boolean isUseSemaphoreInObjectBuilder() { + return useSemaphoreInObjectBuilder; + } + + public void setUseSemaphoreInObjectBuilder(boolean useSemaphoreInObjectBuilder) { + this.useSemaphoreInObjectBuilder = useSemaphoreInObjectBuilder; + } + + public boolean isUseSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks() { + return useSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks; + } + + public void setUseSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks(boolean useSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks) { + this.useSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks = useSemaphoreToLimitConcurrencyOnWriteLockManagerAcquireRequiredLocks; + } + + public int getNoOfThreadsAllowedToObjectBuildInParallel() { + return noOfThreadsAllowedToObjectBuildInParallel; + } + + public void setNoOfThreadsAllowedToObjectBuildInParallel(int noOfThreadsAllowedToObjectBuildInParallel) { + this.noOfThreadsAllowedToObjectBuildInParallel = noOfThreadsAllowedToObjectBuildInParallel; + } + + public int getNoOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel() { + return noOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel; + } + + public void setNoOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel(int noOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel) { + this.noOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel = noOfThreadsAllowedToDoWriteLockManagerAcquireRequiredLocksInParallel; + } + + public long getConcurrencySemaphoreMaxTimePermit() { + return concurrencySemaphoreMaxTimePermit; + } + + public void setConcurrencySemaphoreMaxTimePermit(long concurrencySemaphoreMaxTimePermit) { + this.concurrencySemaphoreMaxTimePermit = concurrencySemaphoreMaxTimePermit; + } + + public long getConcurrencySemaphoreLogTimeout() { + return concurrencySemaphoreLogTimeout; + } + + public void setConcurrencySemaphoreLogTimeout(long concurrencySemaphoreLogTimeout) { + this.concurrencySemaphoreLogTimeout = concurrencySemaphoreLogTimeout; + } + + /** + * + * @return A to string of the cache key (e.g. that we are trying to lock + */ + public String createToStringExplainingOwnedCacheKey(ConcurrencyManager concurrencyManager) { + String cacheKeyClass = concurrencyManager.getClass().getCanonicalName(); + Thread activeThreadObj = concurrencyManager.getActiveThread(); + String activeThread = activeThreadObj != null ? activeThreadObj.getName() : "Null"; + long concurrencyManagerId = concurrencyManager.getConcurrencyManagerId(); + Date concurrencyManagerCreationDate = concurrencyManager.getConcurrencyManagerCreationDate(); + if (concurrencyManager instanceof CacheKey) { + CacheKey cacheKey = (CacheKey) concurrencyManager; + Object primaryKey = cacheKey.getKey(); + Object cacheKeyObject = cacheKey.getObject(); + String canonicalName = cacheKeyObject != null ? cacheKeyObject.getClass().getCanonicalName() + : TraceLocalization.buildMessage("concurrency_util_owned_cache_key_null"); + return TraceLocalization.buildMessage("concurrency_util_owned_cache_key_is_cache_key", new Object[] {canonicalName, primaryKey, + cacheKeyObject, String.valueOf(System.identityHashCode(cacheKeyObject)), + cacheKeyClass, String.valueOf(System.identityHashCode(cacheKey)), + activeThread, concurrencyManager.getNumberOfReaders(), concurrencyManagerId, + ConversionManager.getDefaultManager().convertObject(concurrencyManagerCreationDate, String.class).toString(), + // metadata of number of times the cache key suffered increases in number readers + cacheKey.getTotalNumberOfKeysAcquiredForReading(), + cacheKey.getTotalNumberOfKeysReleasedForReading(), + cacheKey.getTotalNumberOfKeysReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero(), + concurrencyManager.getDepth()}); + } else { + return TraceLocalization.buildMessage("concurrency_util_owned_cache_key_is_not_cache_key", new Object[] {cacheKeyClass, concurrencyManager, activeThread, + concurrencyManagerId, ConversionManager.getDefaultManager().convertObject(concurrencyManagerCreationDate, String.class).toString(), + concurrencyManager.getTotalNumberOfKeysAcquiredForReading(), + concurrencyManager.getTotalNumberOfKeysReleasedForReading(), concurrencyManager + .getTotalNumberOfKeysReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero(), + concurrencyManager.getDepth()}); + } + } + + /** + * We have a thread that is not evolving for quite some while. This is a fairy good indication of eclipselink being + * stuck in a dead lock. So we log some information about the thread that is stuck. + * + * @param elapsedTime + * how many ms have passed since the thread stopped moving + * @param concurrencyManager + * the current cache key that the thread is trying to acquire or the object where the thread is waiting + * for the release deferred locks . + * @param lockManager + * the lock manager + * @param readLockManager + * the read lock manager + * @return Return the string with the tiny message we logged on the server log. This message can be interesting if + * we decide to fire up an interrupted exception + */ + protected String currentThreadIsStuckForSomeTimeProduceTinyLogMessage(long elapsedTime, ConcurrencyManager concurrencyManager, DeferredLockManager lockManager, ReadLockManager readLockManager) { + // We believe this is a dead lock so now we will log some information + Thread currentThread = Thread.currentThread(); + String threadName = currentThread.getName(); + String currentCacheKeyContext = createToStringExplainingOwnedCacheKey(concurrencyManager); + StringWriter errorMessage = new StringWriter(); + long messageNumber = currentTinyMessageLogDumpNumber.incrementAndGet(); + + // (i) Create a big head to explain the cache key we were in when we blow up + errorMessage.write(TraceLocalization.buildMessage("concurrency_util_header_current_cache_key", new Object[] {threadName})); + // explain the cache key itself where the problem is taking place + errorMessage.write(TraceLocalization.buildMessage("concurrency_util_stuck_thread_tiny_log_cache_key", new Object[] { messageNumber, threadName, currentCacheKeyContext, elapsedTime })); + + // (ii) Add information about the cache keys where the current thread was set as actively owning + errorMessage.write(createStringWithSummaryOfActiveLocksOnThread(lockManager, threadName)); + + // (iii) Now very interesting as well are all of the objects that current thread could not acquire the + // deferred locks are essential + errorMessage.write(createStringWithSummaryOfDeferredLocksOnThread(lockManager, threadName)); + + // (iv) Add information about all cache keys te current thread acquired with READ permission + errorMessage.write(createStringWithSummaryOfReadLocksAcquiredByThread(readLockManager, threadName)); + + AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE, errorMessage.toString(), new Object[] {}, false); + threadLocalDateWhenCurrentThreadLastComplainedAboutBeingStuckInDeadLock.set(new Date()); + return errorMessage.toString(); + } + + public boolean tooMuchTimeHasElapsed(final long whileStartTimeMillis, final long maxAllowedSleepTimeMs) { + if (maxAllowedSleepTimeMs == 0L) { + return false; + } + long elapsedTime = System.currentTimeMillis() - whileStartTimeMillis; + return elapsedTime > maxAllowedSleepTimeMs; + } + + /** + * Invoke the {@link #dumpConcurrencyManagerInformationStep01(Map, Map, Map, Map, Map, Map, Map, Set, Map, Map)} if sufficient time has passed. + * This log message will potentially create a massive dump in the server log file. So we need to check when was the + * last time that the masive dump was produced and decide if we can log again the state of the concurrency manager. + * + * The access to dateWhenLastConcurrencyManagerStateFullDumpWasPerformedLock is synchronized, because we do not want + * two threads in parallel to star deciding to dump the complete state of the concurrency manager at the same time. + * Only one thread should succeed in producing the massive dump in a given time window. + * + */ + public void dumpConcurrencyManagerInformationIfAppropriate() { + // We do not want create a big synchronized method that would be dangerous + // but we want to make sure accessing the dateWhenLastConcurrencyManagerStateFullDumpWasPerformedLock is only + // done + // by cone thread at a time + synchronized (dateWhenLastConcurrencyManagerStateFullDumpWasPerformedLock) { + final long maxAllowedFrequencyToProduceMassiveDumpLogMessage = getMaxAllowedFrequencyToProduceMassiveDumpLogMessage(); + boolean tooMuchTimeHasElapsedSinceLastLoggingOfMassiveMessage = tooMuchTimeHasElapsed( + dateWhenLastConcurrencyManagerStateFullDumpWasPerformed, + maxAllowedFrequencyToProduceMassiveDumpLogMessage); + if (!tooMuchTimeHasElapsedSinceLastLoggingOfMassiveMessage) { + // before we can fire to the serverlog such a gigantic message + // we need to allow for more time to pass (e.g. once a minute should be fine) + // it is not like production will be waiting for half an hour for a fozen system to magically + // start working if we do 30 dumps in a half an hour ... it is really irrelevant + return; + } + + // we should proceed with making the big log dump - update the date of when the big dump was last done + dateWhenLastConcurrencyManagerStateFullDumpWasPerformed = System.currentTimeMillis(); + } + + // do the "MassiveDump" logging if enough time has passed since the previous massive dump logging + Map deferredLockManagers = ConcurrencyManager.getDeferredLockManagers(); + Map readLockManagersOriginal = ConcurrencyManager.getReadLockManagers(); + Map mapThreadToWaitOnAcquireOriginal = ConcurrencyManager.getThreadsToWaitOnAcquire(); + Map mapThreadToWaitOnAcquireMethodNameOriginal = ConcurrencyManager.getThreadsToWaitOnAcquireMethodName(); + Map mapThreadToWaitOnAcquireReadLockOriginal = ConcurrencyManager.getThreadsToWaitOnAcquireReadLock(); + Map mapThreadToWaitOnAcquireReadLockMethodNameOriginal = ConcurrencyManager.getThreadsToWaitOnAcquireReadLockMethodName(); + Map> mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal = WriteLockManager.getThreadToFailToAcquireCacheKeys(); + Set setThreadWaitingToReleaseDeferredLocksOriginal = ConcurrencyManager.getThreadsWaitingToReleaseDeferredLocks(); + Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone = ConcurrencyManager.getThreadsWaitingToReleaseDeferredLocksJustification(); + Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal = WriteLockManager.getMapWriteLockManagerThreadToObjectIdsWithChangeSet(); + dumpConcurrencyManagerInformationStep01( + deferredLockManagers, + readLockManagersOriginal, + mapThreadToWaitOnAcquireOriginal, + mapThreadToWaitOnAcquireMethodNameOriginal, + mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal, + mapThreadToWaitOnAcquireReadLockOriginal, + mapThreadToWaitOnAcquireReadLockMethodNameOriginal, + setThreadWaitingToReleaseDeferredLocksOriginal, + mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, + mapThreadToObjectIdWithWriteLockManagerChangesOriginal); + } + + /** + * The current working thread is having problems. It seems to not go forward being stuck either trying to acquire a + * cache key for writing, as a deferred cache key or it is at the end of the process and it is waiting for some + * other thread to finish building some objects it needed to defer. + * + * Now that the system is frozen we want to start spamming into the server log file the state of the concurrency + * manager since this might help us understand the situation of the system. + * + * + * @param deferredLockManagers + * static map coming from the concurrency manager telling us all the threds and their defferred locks and + * active locks + * @param readLockManagersOriginal + * static map coming from the concurrency manager telling us all the threads and their read locks + * @param mapThreadToWaitOnAcquireOriginal + * static map of threads that have registered themselves as waiting for some cache key + * + * @param mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal + * this map relates to the fact that the write lock manager during transaction commits is very illusive. + * The write lock manger is not allowing itself to get stuck on acquiring any cache key. It uses waits + * with timings and therefore the locks needed to write and that cannot be obtained are not appearing + * inside our tracebility maps of the concurrency manager. We needed add the + * {@link org.eclipse.persistence.internal.helper.WriteLockManager#THREAD_TO_FAIL_TO_ACQUIRE_CACHE_KEYS} + * but semantically this map is 100 percent the same thing as the mapThreadToWaitOnAcquireOriginal. It + * still represents a thread wanting to grab a write lock and not managing to get it. Being stuck in that + * step. Wo we will want to fuse together the (mapThreadToWaitOnAcquireOriginal and the + * mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal) to make our lives much easier. + * + * + * @param setThreadWaitingToReleaseDeferredLocksOriginal + * static map of threads that have stopped going deeped in the recursion of object building and are + * waiting for the confirmation that some of the objects they needed to build are finished building. + * + * @param mapThreadToObjectIdWithWriteLockManagerChangesOriginal + * The write lock manager has been tweaked to store information about objects ids that the current thread + * has in its hands and that will required for write locks to be acquired by a committing thread. This + * information is especially interesting if any thread participating in a dead lock is getting stuck in + * the acquisition of write locks as part of the commit process. This information might end up revealing + * a thread that has done too many changes and is creating a bigger risk fo dead lock. The more resources + * an individual thread tries to grab the worse it is for the concurrency layer. The size of the change + * set can be interesting. + */ + protected void dumpConcurrencyManagerInformationStep01(Map deferredLockManagers, + Map readLockManagersOriginal, + Map mapThreadToWaitOnAcquireOriginal, + Map mapThreadToWaitOnAcquireMethodNameOriginal, + Map> mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal, + Map mapThreadToWaitOnAcquireReadLockOriginal, + Map mapThreadToWaitOnAcquireReadLockMethodNameOriginal, + Set setThreadWaitingToReleaseDeferredLocksOriginal, + Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, + Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal) { + // (a) create object to represent our cache state. + ConcurrencyManagerState concurrencyManagerState = createConcurrencyManagerState( + deferredLockManagers, + readLockManagersOriginal, + mapThreadToWaitOnAcquireOriginal, + mapThreadToWaitOnAcquireMethodNameOriginal, + mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal, + mapThreadToWaitOnAcquireReadLockOriginal, + mapThreadToWaitOnAcquireReadLockMethodNameOriginal, + setThreadWaitingToReleaseDeferredLocksOriginal, + mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, + mapThreadToObjectIdWithWriteLockManagerChangesOriginal); + dumpConcurrencyManagerInformationStep02(concurrencyManagerState); + } + + /** + * Dump the server log all of the information that we managed to aggregate about the current state of the + * concurrency manager. + * + * @param concurrencyManagerState a snapshot of the current state of the concurrency manager and threads accessing locks. + */ + protected void dumpConcurrencyManagerInformationStep02(ConcurrencyManagerState concurrencyManagerState) { + StringWriter writer = new StringWriter(); + long messageNumber = currentMassiveDumpMessageLogDumpNumber.incrementAndGet(); + + writer.write(TraceLocalization.buildMessage("concurrency_util_dump_concurrency_manager_information_step02_01", new Object[] {messageNumber})); + // (a) Log information about the current threads in the system and there stack traces + // PAGE 01 of logging information + writer.write(createInformationThreadDump()); + // (b) log information about the threads that are waiting to acquire WRITE/DEFERRED locks + // PAGE 02 of logging information + writer.write(createInformationAboutAllThreadsWaitingToAcquireCacheKeys(concurrencyManagerState.getUnifiedMapOfThreadsStuckTryingToAcquireWriteLock(), + concurrencyManagerState.getUnifiedMapOfThreadsStuckTryingToAcquireWriteLockMethodName())); + // (c) log information about the threads that are waiting to acquire READ locks + // PAGE 03 of logging information + writer.write(createInformationAboutAllThreadsWaitingToAcquireReadCacheKeys(concurrencyManagerState.getMapThreadToWaitOnAcquireReadLockClone(), + concurrencyManagerState.getMapThreadToWaitOnAcquireReadLockCloneMethodName())); + // (c) An interesting summary of information as well is to tell the user about the threads + // that have finished their part of object building and now would like for othe threads to finish the object + // building of locks they had to defer + // PAGE 04 of logging information + writer.write(createInformationAboutAllThreadsWaitingToReleaseDeferredLocks(concurrencyManagerState.getSetThreadWaitingToReleaseDeferredLocksClone())); + // (d) Now we log information from the prespective of a THREAD to resources it has acquired and those + // it needed to defer + // PAGE 05 of logging information + writer.write(createInformationAboutAllResourcesAcquiredAndDeferredByAllThreads(concurrencyManagerState)); + // (e) Dump information by going from cache key to the threads with some sort of relationship to the key + // PAGE 06 of logging information + writer.write(createInformationAboutCacheKeysAndThreadsMakingUseOfTheCacheKey( + concurrencyManagerState.getMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey())); + // (f) Try to explain the reason for the dead lock: + // PAGE 07: we try to find out the reason for the dead lock + // but based on what we have seen so far it is mostly due to cache key corruption + // with the number of readers increased + String deadLockExplanation = dumpDeadLockExplanationIfPossible(concurrencyManagerState); + writer.write(deadLockExplanation); + // (g) Final header + writer.write(TraceLocalization.buildMessage("concurrency_util_dump_concurrency_manager_information_step02_02", new Object[] {messageNumber})); + // there should be no risk that the string is simply to big. the max string size in java is 2pow31 chars + // which means 2 GB string... we can be fairly confident we are not logging 2 GB in a single message. + // not even in the largest of sites... + AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE, writer.toString(), new Object[] {}, false); + } + + /** + * Log information focusing on the different cache keys where threads have hooks on the thread. + * + * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey + * a map that we have constructed where the map keys are cache keys that are of some sort of interest to + * one or more threads (e.g. cache keys with a read lock, acquired or deferred) + * + */ + private String createInformationAboutCacheKeysAndThreadsMakingUseOfTheCacheKey( + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey) { + // (a) Create a header string of information + StringWriter writer = new StringWriter(); + int numberOfCacheKeysGettingDescribed = mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey.size(); + writer.write(TraceLocalization.buildMessage("concurrency_util_cache_keys_threads_making_use_cache_key_01", new Object[] {numberOfCacheKeysGettingDescribed})); + int currentCacheKeyNumber = 0; + for(Map.Entry currentEntry : mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey.entrySet()) { + currentCacheKeyNumber++; + // (b) put a clear separator from the line above + writer.write(TraceLocalization.buildMessage("concurrency_util_cache_keys_threads_making_use_cache_key_02", new Object[] {currentCacheKeyNumber, numberOfCacheKeysGettingDescribed})); + // (c) Describe the current cache key + ConcurrencyManager cacheKey = currentEntry.getKey(); + String cacheKeyToString = createToStringExplainingOwnedCacheKey(cacheKey); + CacheKeyToThreadRelationships dto = currentEntry.getValue(); + writer.write(TraceLocalization.buildMessage("concurrency_util_cache_keys_threads_making_use_cache_key_03", new Object[] {currentCacheKeyNumber, cacheKeyToString, + dto.getThreadNamesThatAcquiredActiveLock(), dto.getThreadNamesThatAcquiredDeferredLock(), dto.getThreadNamesThatAcquiredReadLock(), + dto.getThreadNamesKnownToBeStuckTryingToAcquireLock(), dto.getThreadNamesKnownToBeStuckTryingToAcquireLockForReading()})); + } + writer.write(TraceLocalization.buildMessage("concurrency_util_cache_keys_threads_making_use_cache_key_04")); + return writer.toString(); + } + + protected String dumpDeadLockExplanationIfPossible(ConcurrencyManagerState concurrencyManagerState) { + // (a) Step one - try to detect dead lock + final long startTimeMillis = System.currentTimeMillis(); + List deadLockExplanation = Collections.emptyList(); + long deadLockDetectionTotalExecutionTimeMs = 0l; + try { + deadLockExplanation = ExplainDeadLockUtil.SINGLETON.explainPossibleDeadLockStartRecursion(concurrencyManagerState); + } catch (Exception codeIsBuggyAndBlowingUp) { + // we are unsure if the code will actually work and help + // therefore we make sure we catch any blowup coming from here + AbstractSessionLog.getLog().logThrowable(SessionLog.SEVERE, SessionLog.CACHE, new Exception( + TraceLocalization.buildMessage("concurrency_util_dump__dead_lock_explanation_01"), + codeIsBuggyAndBlowingUp)); + } finally { + final long endTimeMillis = System.currentTimeMillis(); + deadLockDetectionTotalExecutionTimeMs = endTimeMillis - startTimeMillis; + } + // (b) explain what has happened + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_dump__dead_lock_explanation_02")); + if (deadLockExplanation.isEmpty()) { + writer.write(TraceLocalization.buildMessage("concurrency_util_dump__dead_lock_explanation_03")); + } else { + // (i) Write out a summary of how many threads are involved in the deadloc + writer.write(TraceLocalization.buildMessage("concurrency_util_dump__dead_lock_explanation_04", new Object[] {deadLockExplanation.size()})); + // (ii) Print them all out + for (int currentThreadNumber = 0; currentThreadNumber < deadLockExplanation.size(); currentThreadNumber++) { + writer.write(TraceLocalization.buildMessage("concurrency_util_dump__dead_lock_explanation_05", new Object[] {currentThreadNumber + 1, deadLockExplanation.get(currentThreadNumber).toString()})); + } + } + // (c) return the string that tries to explain the reason for the dead lock + writer.write(TraceLocalization.buildMessage("concurrency_util_dump__dead_lock_explanation_06", new Object[] {deadLockDetectionTotalExecutionTimeMs})); + return writer.toString(); + } + + /** + * create a DTO that tries to represent the current snapshot of the concurrency manager and write lock manager cache + * state + */ + public ConcurrencyManagerState createConcurrencyManagerState( + Map deferredLockManagers, + Map readLockManagersOriginal, + Map mapThreadToWaitOnAcquireOriginal, + Map mapThreadToWaitOnAcquireMethodNameOriginal, + Map> mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal, + Map mapThreadToWaitOnAcquireReadLockOriginal, + Map mapThreadToWaitOnAcquireReadLockMethodNameOriginal, + Set setThreadWaitingToReleaseDeferredLocksOriginal, + Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, + Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal) { + // (a) As a first step we want to clone-copy the two maps + // once we start working with the maps and using them to do dead lock detection + // or simply print the state of the system we do not want the maps to continue changing as the threads referenced in the maps + // go forward with their work + Map readLockManagerMapClone = cloneReadLockManagerMap(readLockManagersOriginal); + Map deferredLockManagerMapClone = cloneDeferredLockManagerMap(deferredLockManagers); + + // NOTE: the wait on acquire write locks are tricky + // we want to fuse together the threads we are tracking waiting to acquire locks + // both the one we track in the hash map of the concurrency manager + // as well as the ones we need to track inside of the write lock manager + Map> unifiedMapOfThreadsStuckTryingToAcquireWriteLock = null; + // additional method data about the method that created the trace + Map mapThreadToWaitOnAcquireMethodNameClone = cloneMapThreadToMethodName(mapThreadToWaitOnAcquireMethodNameOriginal); + { + // information from the concurrency manager state + Map mapThreadToWaitOnAcquireClone = cloneMapThreadToWaitOnAcquire(mapThreadToWaitOnAcquireOriginal); + // info from the the write lock manager state + Map> mapThreadToWaitOnAcquireInsideWriteLockManagerClone = cloneMapThreadToWaitOnAcquireInsideWriteLockManagerOriginal( + mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal); + // merge both maps together + enrichMapThreadToWaitOnAcquireInsideWriteLockManagerClone(mapThreadToWaitOnAcquireInsideWriteLockManagerClone, mapThreadToWaitOnAcquireClone); + // update the variable we want to be carrying forward to be the enriched map + unifiedMapOfThreadsStuckTryingToAcquireWriteLock = mapThreadToWaitOnAcquireInsideWriteLockManagerClone; + } + Map mapThreadToWaitOnAcquireReadLockClone = cloneMapThreadToWaitOnAcquire(mapThreadToWaitOnAcquireReadLockOriginal); + Map mapThreadToWaitOnAcquireReadLockMethodNameClone = cloneMapThreadToMethodName(mapThreadToWaitOnAcquireReadLockMethodNameOriginal); + Set setThreadWaitingToReleaseDeferredLocksClone = cloneSetThreadsThatAreCurrentlyWaitingToReleaseDeferredLocks(setThreadWaitingToReleaseDeferredLocksOriginal); + Map> mapThreadToObjectIdWithWriteLockManagerChangesClone = cloneMapThreadToObjectIdWithWriteLockManagerChanges( + mapThreadToObjectIdWithWriteLockManagerChangesOriginal); + // (b) All of the above maps tell a story from the respective of the threads + // very interesting as well is to be able to go over the story of the cache keys and what threads have + // expectations for these cache keys + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey = new HashMap<>(); + // (i) pump information about the read locks + enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoAboutReadLocks( + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, readLockManagerMapClone); + + // (ii) pump information about the active and deferred locks + enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoAboutActiveAndDeferredLocks( + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, deferredLockManagerMapClone); + + // (iii) Pump information into the map about the threads that are stuck because they cannot acquire a certain + // cache key (they want to acquire the cache key for WRITING either to become active thread or to defer) + enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoThreadsStuckOnAcquire( + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, unifiedMapOfThreadsStuckTryingToAcquireWriteLock); + + // (iv) Pump information into the map about the threads that are stuck because they cannot acquire a certain + // cache key (they want to acquire the cache key for READING) + enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoThreadsStuckOnAcquireLockForReading( + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, mapThreadToWaitOnAcquireReadLockClone); + + return new ConcurrencyManagerState( + readLockManagerMapClone, deferredLockManagerMapClone, unifiedMapOfThreadsStuckTryingToAcquireWriteLock, + mapThreadToWaitOnAcquireMethodNameClone, mapThreadToWaitOnAcquireReadLockClone, mapThreadToWaitOnAcquireReadLockMethodNameClone, + setThreadWaitingToReleaseDeferredLocksClone, mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, mapThreadToObjectIdWithWriteLockManagerChangesClone); + } + + /** + * Create a print of the ACTIVE locks associated to the DeferredLockManager. Owning an active lock on a thread + * implies that the thread is allowed to do write operations in relation to the object. + */ + private String createStringWithSummaryOfActiveLocksOnThread(DeferredLockManager lockManager, String threadName) { + // (a) Make sure the lock manager being passed is not null + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_header_active_locks_owned_by_thread", new Object[] {threadName})); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_active_locks_on_thread_1", new Object[] {threadName})); + if (lockManager == null) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_active_locks_on_thread_2")); + return writer.toString(); + } + // (b) Try to build a string that lists all of the active locks on the thread + // Loop over all of the active locks and print them + List activeLocks = new ArrayList<>(lockManager.getActiveLocks()); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_active_locks_on_thread_3", new Object[] {activeLocks.size()})); + for (int activeLockNumber = 0; activeLockNumber < activeLocks.size(); activeLockNumber++) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_active_locks_on_thread_4", new Object[] {activeLockNumber, createToStringExplainingOwnedCacheKey(activeLocks.get(activeLockNumber))})); + } + return writer.toString(); + } + + /** + * The {@link org.eclipse.persistence.internal.helper.DeferredLockManager} contains two baskat of locks being + * managed for a thread. One are active locks (granted write permission). The other deferred locks (write access or + * read access was being held by somebody else and the thread deferred). + * + * @param lockManager + * the deferred lock manager of the current thread + * @return + */ + private String createStringWithSummaryOfDeferredLocksOnThread(DeferredLockManager lockManager, String threadName) { + // (a) Make sure the lock manager being passed is not null + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_header_deferred_locks_owned_by_thread", new Object[] {threadName})); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_deferred_locks_on_thread_1", new Object[] {threadName})); + if (lockManager == null) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_deferred_locks_on_thread_2")); + return writer.toString(); + } + // (b) Try to build a string that lists all of the active locks on the thread + // Loop over all of the deferred locks and print them + @SuppressWarnings("unchecked") + List deferredLocks = new ArrayList<>(lockManager.getDeferredLocks()); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_deferred_locks_on_thread_3", new Object[] {deferredLocks.size()})); + for (int deferredLockNumber = 0; deferredLockNumber < deferredLocks.size(); deferredLockNumber++) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_deferred_locks_on_thread_4", new Object[] {deferredLockNumber, createToStringExplainingOwnedCacheKey(deferredLocks.get(deferredLockNumber))})); + } + return writer.toString(); + } + + /** + * Relates to issue. We are convinced that a read lock manager is needed for two reasons: implementing a + * dead lock detection algorithm which are currently not doing. And also beause when the code does not go according + * to happy path and do encounter a dead lock and forced to interrupt the thread, we need to force the thread to + * release any acquired a read locks it may have. + * + * @param readLockManager + * this is hacky class we created to close a gap in eclipselink code whereby read access on cache keys is + * not tracked. The only thing that happens is incrementing the nuber of readers but that is not + * sufficient if we need to abort all read locks. + * @param threadName + * the thread for which we are logging the read locks acquired + * @return A big string summarizing all of the read locks the thread. + */ + private String createStringWithSummaryOfReadLocksAcquiredByThread(ReadLockManager readLockManager, String threadName) { + // (a) Make sure the lock manager being passed is not null + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_header_reader_locks_owned_by_thread", new Object[] {threadName})); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step001_1", new Object[] {threadName})); + if (readLockManager == null) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step001_2")); + return writer.toString(); + } + // (b) Try to build a string that lists all of the acitve locks on the thread + // Loop over al of the active locks and print them + @SuppressWarnings("unchecked") + List readLocks = readLockManager.getReadLocks(); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step001_3", new Object[] {readLocks.size()})); + for (int readLockNumber = 0; readLockNumber < readLocks.size(); readLockNumber++) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step001_4", new Object[] {readLockNumber + 1, createToStringExplainingOwnedCacheKey(readLocks.get(readLockNumber))})); + } + // (c) This is the main point of candidate 007 - having a lot fatter information about when and where the read + // locks were acquired + // (i) If a thread has 48 read locks most likely it acquired all 48 read locks in the exact same code area + // so we want to avoid dumping 48 stack traces to the massive dump that would be completely out of control + // we create a map of strings in order to know if we can refer to any previously created stack trace + Map stackTraceStringToStackTraceExampleNumber = new HashMap<>(); + // (ii) Let us start dumping a mini header to give indication we now will sow very fact information about the + // read locks acquired by a thread + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_1")); + // (iii) let us organize the iformation we are about to dump by the creation date of the records in the map + Map> mapThreadToReadLockAcquisitionMetadata = readLockManager.getMapThreadToReadLockAcquisitionMetadata(); + List sortedThreadIds = new ArrayList<>(mapThreadToReadLockAcquisitionMetadata.keySet()); + Collections.sort(sortedThreadIds); + // (iv) commence the loop of dumping trace data LOOP OVER EACH JPA TRANSACTION ID + for (Long currentThreadId : sortedThreadIds) { + List readLocksAcquiredByThread = mapThreadToReadLockAcquisitionMetadata.get(currentThreadId); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_2", new Object[] {threadName, currentThreadId, readLocksAcquiredByThread.size()})); + // LOOP OVER EACH CACHE KEY ACQUIRED FORE READING BUT NEVER RELEASED FOR CURRENT THREAD ID + int readLockNumber = 0; + for (ReadLockAcquisitionMetadata currentReadLockAcquiredAndNeverReleased : readLocksAcquiredByThread) { + readLockNumber++; + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_3", new Object[] {readLockNumber, + SINGLETON.createToStringExplainingOwnedCacheKey(currentReadLockAcquiredAndNeverReleased.getCacheKeyWhoseNumberOfReadersThreadIsIncrementing()), + ConversionManager.getDefaultManager().convertObject(currentReadLockAcquiredAndNeverReleased.getDateOfReadLockAcquisition(), String.class).toString(), + currentReadLockAcquiredAndNeverReleased.getNumberOfReadersOnCacheKeyBeforeIncrementingByOne(), + currentReadLockAcquiredAndNeverReleased.getCurrentThreadStackTraceInformationCpuTimeCostMs()})); + String stackTraceInformation = currentReadLockAcquiredAndNeverReleased.getCurrentThreadStackTraceInformation(); + if (stackTraceStringToStackTraceExampleNumber.containsKey(stackTraceInformation)) { + // we can spare the massive dump from being any fatter we have alreayd added a stack trace id that + // is identical to the stack trace were were about dump + // we just refer to the stack trace id. + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_4", new Object[] {readLockNumber, stackTraceStringToStackTraceExampleNumber.get(stackTraceInformation)})); + } else { + // Since we have not see this stack trace pattern for this thread yet we will dump the stack trace + // into the massive dump giving it a new global id + long stackTraceId = stackTraceIdAtomicLong.incrementAndGet(); + stackTraceStringToStackTraceExampleNumber.put(stackTraceInformation, stackTraceId); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_5", new Object[] {readLockNumber, stackTraceId, stackTraceInformation})); + } + writer.write("\n\n"); + } + } + + // (d) We have some more information to pump out namely errors we have traced each time the number of readers was decremented + writer.write("\n\n"); + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_6", new Object[] {threadName, readLockManager.getRemoveReadLockProblemsDetected().size()})); + for (int releaseReadLockProblemNumber = 0; releaseReadLockProblemNumber < readLockManager.getRemoveReadLockProblemsDetected().size(); releaseReadLockProblemNumber++) { + writer.write(TraceLocalization.buildMessage("concurrency_util_summary_read_locks_on_thread_step002_7", new Object[] {releaseReadLockProblemNumber + 1, readLockManager.getRemoveReadLockProblemsDetected().get(releaseReadLockProblemNumber)})); + } + writer.write("\n\n"); + return writer.toString(); + } + + /** + * This helper API is created due to the problem of the corruption of the eclipselink cache. The idea is to have a + * tool that allows us to know specifically where the current thread was located when it acquired a READ LOCK. + * + *

+ * Cache corruption problem:
+ * namely the fact that when dead locks are seen to be taking place some of the threads that seem to be primary + * culprits of the dead lock are actually idle doing nothing but they have have left the number of readers of the + * cache corrupted (e.g. typically forever incremnted). + * + * @return get the stack trace of the current thread. + */ + public String enrichGenerateThreadDumpForCurrentThread() { + final Thread currentThread = Thread.currentThread(); + final long currentThreadId = currentThread.getId(); + + try { + // (a) search for the stack trace of the current + final StringWriter writer = new StringWriter(); + ThreadInfo[] threadInfos = null; + if (PrivilegedAccessHelper.shouldUsePrivilegedAccess()) { + threadInfos = AccessController.doPrivileged(new PrivilegedGetThreadInfo(new long[] { currentThreadId }, 700)); + } else { + final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); + threadInfos = threadMXBean.getThreadInfo(new long[] { currentThreadId }, 700); + } + + for (ThreadInfo threadInfo : threadInfos) { + enrichGenerateThreadDumpForThreadInfo(writer, threadInfo); + } + return writer.toString(); + } catch (Exception failToAcquireThreadDumpProgrammatically) { + AbstractSessionLog.getLog().logThrowable(SessionLog.SEVERE, SessionLog.CACHE, failToAcquireThreadDumpProgrammatically); + return TraceLocalization.buildMessage("concurrency_util_enrich_thread_dump", new Object[] {failToAcquireThreadDumpProgrammatically.getMessage()}); + } + } + + /** + * We simply copy pasted this code from the net to have some helper tool to generate thread dumps programatically + * when the event takes place. + * + *

+ * NOTE: This approach can be easily tested in a basic unit test. + * + * + * Original source of code + * + */ + private String enrichGenerateThreadDump() { + try { + final StringWriter writer = new StringWriter(); + + ThreadInfo[] threadInfos = null; + if (PrivilegedAccessHelper.shouldUsePrivilegedAccess()) { + threadInfos = AccessController.doPrivileged(new PrivilegedGetThreadInfo(700)); + } else { + final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); + threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 700); + } + + for (ThreadInfo threadInfo : threadInfos) { + enrichGenerateThreadDumpForThreadInfo(writer, threadInfo); + } + return writer.toString(); + } catch (Exception failToAcquireThreadDumpProgrammatically) { + AbstractSessionLog.getLog().logThrowable(SessionLog.SEVERE, SessionLog.CACHE, failToAcquireThreadDumpProgrammatically); + return TraceLocalization.buildMessage("concurrency_util_enrich_thread_dump", new Object[] {failToAcquireThreadDumpProgrammatically.getMessage()}); + } + } + + /** + * Enrich a given string building with the the thread writer for a given thread info object. + */ + private void enrichGenerateThreadDumpForThreadInfo(StringWriter writer, ThreadInfo threadInfo) { + writer.write(TraceLocalization.buildMessage("concurrency_util_enrich_thread_dump_thread_info_1", new Object[] {threadInfo.getThreadName(), threadInfo.getThreadState()})); + final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace(); + for (final StackTraceElement stackTraceElement : stackTraceElements) { + writer.write(TraceLocalization.buildMessage("concurrency_util_enrich_thread_dump_thread_info_2", new Object[] {stackTraceElement})); + } + writer.write("\n\n"); + } + + /** + * Write a severe log message with the current thread dump. + */ + private String createInformationThreadDump() { + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_thread_dump", new Object[] {enrichGenerateThreadDump()})); + return writer.toString(); + } + + /** + * In this page of log dumping information we want to give a summary to the user of threads that appear to be stuck + * doing an acquire of the cache key. + * + * @param unifiedMapOfThreadsStuckTryingToAcquireWriteLock + * this a cloned map that has an association between thread and cache keys the thread would like to + * acquire but cannot because there are readers on the cache key. The thread might be stuck either on the + * concurrency manager or on the write lock manager. + * @param mapThreadToWaitOnAcquireMethodNameClone + * the name of the method that updated the + * {@link org.eclipse.persistence.internal.helper.ConcurrencyManager#THREADS_TO_WAIT_ON_ACQUIRE} If we + * do not know the method name that created the trace then it must have been the + * {@link org.eclipse.persistence.internal.helper.WriteLockManager#addCacheKeyToMapWriteLockManagerToCacheKeysThatCouldNotBeAcquired(Thread, ConcurrencyManager, long)} + * . This is not obvious but essentially we trace the acquisition of write locks in to places. The first + * is the map already mentioned in the concurrency manager. The second is the map + * {@link org.eclipse.persistence.internal.helper.WriteLockManager#THREAD_TO_FAIL_TO_ACQUIRE_CACHE_KEYS} + * for the purose of the massive dump we act as if there was a single unified map. However when the + * MAP_THREAD_TO_WAIT_ON_ACQUIRE we not only add to this map the cache key we cannot acquire but also the + * method name. When we work with the map the THREADS_TO_FAIL_TO_ACQUIRE_CACHE_KEYS + * we just keep trace of the cache key that could not be acquired. This + * THREADS_TO_FAIL_TO_ACQUIRE_CACHE_KEYS is currently only used in one spot so we + * can avoid the trouble of adding even more tracing for this. + */ + private String createInformationAboutAllThreadsWaitingToAcquireCacheKeys(Map> unifiedMapOfThreadsStuckTryingToAcquireWriteLock, + Map mapThreadToWaitOnAcquireMethodNameClone) { + // (a) Create a header string of information + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_cache_keys_1", new Object[] {unifiedMapOfThreadsStuckTryingToAcquireWriteLock.size()})); + int currentThreadNumber = 0; + for (Map.Entry> currentEntry : unifiedMapOfThreadsStuckTryingToAcquireWriteLock + .entrySet()) { + currentThreadNumber++; + Thread thread = currentEntry.getKey(); + Set writeLocksCurrentThreadIsTryingToAcquire = currentEntry.getValue(); + for (ConcurrencyManager cacheKey : writeLocksCurrentThreadIsTryingToAcquire) { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_cache_keys_2", new Object[] {currentThreadNumber, thread.getName(), createToStringExplainingOwnedCacheKey(cacheKey)})); + // add as well information about what method created this trace entry + // this important in case we start leaking traces when the code is configured + // to blow up + String methodNameThatGotStuckWaitingToAcquire = mapThreadToWaitOnAcquireMethodNameClone.get(currentEntry.getKey()); + if (methodNameThatGotStuckWaitingToAcquire == null) { + // this because the acquire trace was not on the + // org.eclipse.persistence.internal.helper.ConcurrencyManager.MAP_THREAD_TO_WAIT_ON_ACQUIRE + // by the concurrency manager but rather the trace of the wait on the write + // lock was created by the mapThreadToWaitOnAcquireInsideWriteLockManagerClone + // see + // org.eclipse.persistence.internal.helper.WriteLockManager.addCacheKeyToMapWriteLockManagerToCacheKeysThatCouldNotBeAcquired(Thread, ConcurrencyManager, Date) + methodNameThatGotStuckWaitingToAcquire = TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_cache_keys_3"); + } + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_cache_keys_4", new Object[] {methodNameThatGotStuckWaitingToAcquire})); + } + } + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_cache_keys_5")); + return writer.toString(); + } + + /** + * In this page of log dumping information we want to give a summary to the user of threads that appear to be stuck + * doing an acquire of the cache key. + * + * @param mapThreadToWaitOnAcquireReadLockClone + * this a cloned map that has an association between thread and cache keys the thread would like to + * acquire for READING but cannot because there is some active thread (other than themselves) holding the cache key (e.g. for writing) + */ + protected String createInformationAboutAllThreadsWaitingToAcquireReadCacheKeys(Map mapThreadToWaitOnAcquireReadLockClone, + Map mapThreadToWaitOnAcquireReadLockMethodNameClone) { + // (a) Create a header string of information + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_read_cache_keys_1", new Object[] {mapThreadToWaitOnAcquireReadLockClone.size()})); + + int currentThreadNumber = 0; + for(Map.Entry currentEntry : mapThreadToWaitOnAcquireReadLockClone.entrySet()) { + currentThreadNumber++; + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_read_cache_keys_2", new Object[] {currentThreadNumber, currentEntry.getKey().getName(), createToStringExplainingOwnedCacheKey(currentEntry.getValue())})); + String methodNameThatGotStuckWaitingToAcquire = mapThreadToWaitOnAcquireReadLockMethodNameClone.get(currentEntry.getKey()); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_read_cache_keys_3", new Object[] {methodNameThatGotStuckWaitingToAcquire})); + } + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_acquire_read_cache_keys_4")); + return writer.toString(); + } + + /** + * Log information about threads not moving forward because they are waiting for the + * {@code isBuildObjectOnThreadComplete } to return true. + * + * @param setThreadWaitingToReleaseDeferredLocksClone + * threads waiting for the release deferred lock process to complete. + */ + protected String createInformationAboutAllThreadsWaitingToReleaseDeferredLocks(Set setThreadWaitingToReleaseDeferredLocksClone) { + // (a) Create a header string of information + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_release_deferred_locks_1", new Object[] {setThreadWaitingToReleaseDeferredLocksClone.size()})); + + // (b) add to the string building information about each of these threads that are stuck in the + // isBuildObjectOnThreadComplete + int currentThreadNumber = 0; + for (Thread currentEntry : setThreadWaitingToReleaseDeferredLocksClone) { + currentThreadNumber++; + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_release_deferred_locks_2", new Object[] {currentThreadNumber, currentEntry.getName()})); + } + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_threads_release_deferred_locks_3")); + return writer.toString(); + } + + /** + * Log information about all threads tracked in the concurrency manager. + * + * @param concurrencyManagerState + * and object that represents a snapshot of the current state of the concurrency manager. + */ + protected String createInformationAboutAllResourcesAcquiredAndDeferredByAllThreads( + ConcurrencyManagerState concurrencyManagerState) { + // (a) Compile a union of all threads + Set allRelevantThreads = new HashSet<>(); + allRelevantThreads.addAll(concurrencyManagerState.getSetThreadWaitingToReleaseDeferredLocksClone()); + allRelevantThreads.addAll(concurrencyManagerState.getUnifiedMapOfThreadsStuckTryingToAcquireWriteLock().keySet()); + allRelevantThreads.addAll(concurrencyManagerState.getDeferredLockManagerMapClone().keySet()); + allRelevantThreads.addAll(concurrencyManagerState.getReadLockManagerMapClone().keySet()); + + // (b) print information about all threads + StringWriter writer = new StringWriter(); + int currentThreadNumber = 0; + final int totalNumberOfThreads = allRelevantThreads.size(); + for(Thread currentThread : allRelevantThreads) { + currentThreadNumber++; + ReadLockManager readLockManager = concurrencyManagerState.getReadLockManagerMapClone().get(currentThread); + DeferredLockManager lockManager = concurrencyManagerState.getDeferredLockManagerMapClone().get(currentThread); + String waitingToReleaseDeferredLocksJustification = concurrencyManagerState.getMapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone().get(currentThread); + Set waitingOnAcquireCacheKeys = concurrencyManagerState + .getUnifiedMapOfThreadsStuckTryingToAcquireWriteLock() + .get(currentThread); + ConcurrencyManager waitingOnAcquireReadCacheKey = concurrencyManagerState + .getMapThreadToWaitOnAcquireReadLockClone().get(currentThread); + boolean threadWaitingToReleaseDeferredLocks = concurrencyManagerState + .getSetThreadWaitingToReleaseDeferredLocksClone().contains(currentThread); + + Set writeManagerThreadPrimaryKeysWithChangesToBeMerged = concurrencyManagerState + .getMapThreadToObjectIdWithWriteLockManagerChangesClone() + .get(currentThread); + String informationAboutCurrentThread = createInformationAboutAllResourcesAcquiredAndDeferredByThread( + readLockManager, lockManager, waitingOnAcquireCacheKeys, waitingOnAcquireReadCacheKey, + threadWaitingToReleaseDeferredLocks, currentThread, currentThreadNumber, totalNumberOfThreads, + writeManagerThreadPrimaryKeysWithChangesToBeMerged, waitingToReleaseDeferredLocksJustification); + writer.write(informationAboutCurrentThread); + } + + // (c) Log on the serverlog information about all the threads being tracked in the concurrency manager + return writer.toString(); + } + + /** + * Build a string that tries to describe in as much detail as possible the resources associated to the current + * thread. + * + * @param readLockManager + * the read lock manager for the current thread + * @param lockManager + * the lock manager for the current thread + * @param waitingOnAcquireCacheKeys + * null if the current thread is not waiting to acquire a cache key otherwise the cachekey that the + * current thread wants to acquire and that is making it block. This field evolved to be a set and not + * just one cache key because when we needed to tweak the write lock manager code to report about why the + * write lock manager is stuck we need it to create the map + * {@link org.eclipse.persistence.internal.helper.WriteLockManager#THREAD_TO_FAIL_TO_ACQUIRE_CACHE_KEYS} + * whereby during a commit where entiteis are merged into the shared cache a thread might be trying to + * grab several write locks. so here we have a mix between the concurrency manager cache key a thread + * currently wants together with cache keys the write lock managed is not managing to grab. + * + * @param waitingOnAcquireReadCacheKey + * cache key the thread is failing to acquire in the + * {@link org.eclipse.persistence.internal.helper.ConcurrencyManager#acquireReadLock()} + * @param threadWaitingToReleaseDeferredLocks + * true if the curren thread is now blocked waiting to confirm the locks it deferred have finished + * building the corresponding objects. + * @param thread + * the thread eing described + * @param currentThreadNumber + * just loop incremented index to help the dump log messages give the feeling of the current thread being + * described and how many more threads are still to be described + * @param totalNumberOfThreads + * the total number of threads being described in a for loop + * @return a string describing the thread provided. We can see the active locks, deferred locks, read locks etc... + * as well sa if the thread is waiting to acquire a specific cache key or waiting for build object to + * complete. + * @param writeManagerThreadPrimaryKeysWithChangesToBeMerged + * Null for all threads excep those that are currently about to commit and merge changes to the shared + * cache. In this case it holds the primary keys of the objects that were changed by the transaction. The + * write lock manager has been tweaked to store information about objects ids that the current thread has + * in its hands and that will required for write locks to be acquired by a committing thread. This + * information is especially interesting if any thread participating in a dead lock is getting stuck in + * the acquisition of write locks as part of the commit process. This information might end up revealing + * a thread that has done too many changes and is creating a bigger risk fo dead lock. The more resources + * an individual thread tries to grab the worse it is for the concurrency layer. The size of the change + * set can be interesting. + * @param waitingToReleaseDeferredLocksJustification + * when a thread is stuck for more than 500 ms in the release defferred locks algorithm, the concurrency + * manager starts try to justify why the method isBuildObjectComplete keeps returning false. This + * information is important whenever the param thread waiting to release deferred locks is true + */ + protected String createInformationAboutAllResourcesAcquiredAndDeferredByThread( + ReadLockManager readLockManager, DeferredLockManager lockManager, + Set waitingOnAcquireCacheKeys, ConcurrencyManager waitingOnAcquireReadCacheKey, + boolean threadWaitingToReleaseDeferredLocks, Thread thread, + int currentThreadNumber, int totalNumberOfThreads, + Set writeManagerThreadPrimaryKeysWithChangesToBeMerged, + String waitingToReleaseDeferredLocksJustification) { + + // (a) Build a base overview summary of the thread state + StringWriter writer = new StringWriter(); + String threadName = thread.getName(); + // (i) A base summary about the current thread + // is the thread waiting to acquire a lock or is it waiting to release deferred locks + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_1", new Object[] {currentThreadNumber, totalNumberOfThreads, thread.getName(), threadWaitingToReleaseDeferredLocks})); + // (iii) Information is this is a thread in the process of trying to acquire for writing a cache key + if (waitingOnAcquireCacheKeys != null && !waitingOnAcquireCacheKeys.isEmpty()) { + for (ConcurrencyManager waitingOnAcquireCacheKey : waitingOnAcquireCacheKeys) { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_2", new Object[] {createToStringExplainingOwnedCacheKey(waitingOnAcquireCacheKey)})); + } + } else { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_3")); + } + // (iv) Information is this is a thread in the process of trying to acquire for reading a cache key + if (waitingOnAcquireReadCacheKey != null) { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_4", new Object[] {createToStringExplainingOwnedCacheKey(waitingOnAcquireReadCacheKey)})); + } else { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_5")); + } + // (v) if the thread is stuck in the write lock manager trying to acquire all write locks to commit and merge + // changes to the shared + // cache this information might be interesting + boolean currentThreadIsTryingCommitToSharedCacheChanges = writeManagerThreadPrimaryKeysWithChangesToBeMerged != null + && !writeManagerThreadPrimaryKeysWithChangesToBeMerged.isEmpty(); + if (currentThreadIsTryingCommitToSharedCacheChanges) { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_6", new Object[] {writeManagerThreadPrimaryKeysWithChangesToBeMerged.toString()})); + } else { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_7")); + } + // Start dumping information about the deferred lock and read lock manager of this thread + // (b) Add information about the cache keys where the current thread was set as actively owning + writer.write(ConcurrencyUtil.SINGLETON.createStringWithSummaryOfActiveLocksOnThread(lockManager, threadName)); + // (c) Now very interesting as well are all of the objects that current thread could not acquire the deferred locks are essential + writer.write(createStringWithSummaryOfDeferredLocksOnThread(lockManager, threadName)); + // (d) On the topic of the defferred locks we can also try to do better and explain why the algorithm + // keeps returning false that the build object is not yet complete + if (waitingToReleaseDeferredLocksJustification != null && waitingToReleaseDeferredLocksJustification.length() > 0) { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_8", new Object[] {waitingToReleaseDeferredLocksJustification})); + } else { + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_9")); + } + // (e) Add information about all cache keys te current thread acquired with READ permission + writer.write(createStringWithSummaryOfReadLocksAcquiredByThread(readLockManager, threadName)); + writer.write(TraceLocalization.buildMessage("concurrency_util_create_information_all_resources_acquired_deferred_10", new Object[] {currentThreadNumber, totalNumberOfThreads})); + return writer.toString(); + } + + /** + * Clone the static map of the concurrency manager that tells us about threads waiting to acquire locks. + * + * @param mapThreadToWaitOnAcquireOriginal + * the original map we want to clone + * @return a cloned map + */ + public static Map cloneMapThreadToWaitOnAcquire(Map mapThreadToWaitOnAcquireOriginal) { + return new HashMap<>(mapThreadToWaitOnAcquireOriginal); + } + + /** + * Clone the map of the method names that tells us justification where threads acquire locks. + * + * @param mapThreadToWaitOnAcquireOriginal + * the original map we want to clone + * @return a cloned map + */ + public static Map cloneMapThreadToMethodName(Map mapThreadToWaitOnAcquireOriginal) { + return new HashMap<>(mapThreadToWaitOnAcquireOriginal); + } + + /** + * Clone the static map of the concurrency manager that tells us about threads waiting to acquire locks. + * + * @param mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal + * the original map we want to clone + * @return a cloned map + */ + public static Map> cloneMapThreadToWaitOnAcquireInsideWriteLockManagerOriginal( + Map> mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal) { + Map> result = new HashMap<>(); + // this iterator is safe because the original map is a concurrent hashmap so the iterator should not blow up + for (Map.Entry> entry : mapThreadToWaitOnAcquireInsideWriteLockManagerOriginal.entrySet()) { + Set clonedSet = new HashSet<>(entry.getValue()); + result.put(entry.getKey(), clonedSet); + } + return result; + } + + + /** + * We have two maps we are using to trace threads that are stuck acquiring locks. + * One map is found in the concurrency manager the other in the write lock manager. + * When we start dumping information we only care about working with one and only one map. + * Therefore we merge the two maps together since semantically they both mean the exact same thing: + * a thread A wants a cachekey B for writing and is not getting it. + * + * @param mapThreadToWaitOnAcquireInsideWriteLockManagerClone + * this is the map we want o enrich + * @param mapThreadToWaitOnAcquireClone + * this is the map whose entries we want to copy into the map to enrich + */ + public static void enrichMapThreadToWaitOnAcquireInsideWriteLockManagerClone( + Map> mapThreadToWaitOnAcquireInsideWriteLockManagerClone, Map mapThreadToWaitOnAcquireClone ) { + // (a) Loop over each of the threads the map of mapThreadToWaitOnAcquireClone + // and add the cache keys threads are waiting for into the corresponding entery of the + // mapThreadToWaitOnAcquireInsideWriteLockManagerClone + for (Map.Entry entry : mapThreadToWaitOnAcquireClone.entrySet()) { + Thread currentThread = entry.getKey(); + if(!mapThreadToWaitOnAcquireInsideWriteLockManagerClone.containsKey(currentThread)) { + mapThreadToWaitOnAcquireInsideWriteLockManagerClone.put(currentThread, new HashSet<>()); + } + Set cacheKeys = mapThreadToWaitOnAcquireInsideWriteLockManagerClone.get(currentThread); + cacheKeys.add(entry.getValue()); + } + } + + /** + * A set of threads that are at the end of object building and are waiting for the deferred locks to be resolved. + * + * @param setThreadWaitingToReleaseDeferredLocksOriginal + * the original set of threads that are waiting for deferred locks to be resolved. + * @return A cloned has set of threads waiting for their deferred locks to be resolved. + */ + public static Set cloneSetThreadsThatAreCurrentlyWaitingToReleaseDeferredLocks( + Set setThreadWaitingToReleaseDeferredLocksOriginal) { + return new HashSet<>(setThreadWaitingToReleaseDeferredLocksOriginal); + } + + /** + * Clone the information about threads that are in the write lock manager trying to commit and the object ids they + * are holding with some arbitrary changes. + * + * @param mapThreadToObjectIdWithWriteLockManagerChangesOriginal + * map of thread to the primary keys of of objects changed by a transaction in the commit phase. This is + * the original map grabbed from the WriteLockManager. + * @return a cloned map of thread to object id primary keys that a thread committing might have changed. + */ + public static Map> cloneMapThreadToObjectIdWithWriteLockManagerChanges( + Map> mapThreadToObjectIdWithWriteLockManagerChangesOriginal) { + Map> result = new HashMap<>(); + for (Map.Entry> currentEntry : mapThreadToObjectIdWithWriteLockManagerChangesOriginal.entrySet()) { + result.put(currentEntry.getKey(), new HashSet<>(currentEntry.getValue())); + } + return result; + } + + /** + * To facilitate algorithms that want to dump a snapshot of the current state of the concurrency manager or to start + * a hunt for dead locks this api faciliates the boostraping logic of such algorithms by giving the algorithm a + * stable clone of the map of read locks that we know will not change throughout the time the algorithm is running. + * + * @param readLockManagersOriginal + * This the original map of read locks referred by the concurrency manager. This is a very bad platform + * to work with because if for whatever reason not all threads are frozen and some are actualy managing + * to complete their transactions the contents of this map are systematically changing with threds being + * added in and removed. + * @return A clone of the readLockManagersOriginal. Essentially the map instance returned is new and independent and + * the values {@link ReadLockManager} are also clones and independent. The only thing that is + * not cloned here - whose state could be changing - are the cache key themselves. The cache keys pointed by + * the vector {@link ReadLockManager#getReadLocks()} are the original values. So our clone + * from the read lock manager is not a perfectly stable clone. It will not be blowing up telling us + * concurrent access modification when we loop through the vector. But no one can guarnate the number of + * readers on the cache key stays the same nor that the active thread on a cache key stays the same... Those + * values can definitely be fluctuating (not ideal ... but it would be quite hard to get better than this). + */ + public Map cloneReadLockManagerMap(Map readLockManagersOriginal) { + // (a) Start by safeguarding the keys of the map we want to clone + // (e.g. avoid the risk of concurrent modification exception while looping over a keyset) + List mapKeys = new ArrayList<>(readLockManagersOriginal.keySet()); + + // (b) start the the cloning process + Map cloneResult = new HashMap<>(); + for (Thread currentKey : mapKeys) { + ReadLockManager readLockManagerOriginal = readLockManagersOriginal.get(currentKey); + if (readLockManagerOriginal != null) { + ReadLockManager readLockManagerClone = readLockManagerOriginal.clone(); + cloneResult.put(currentKey, readLockManagerClone); + } else { + // most likely the current thread has just finished its work + // and is no longer to be found in the original map + } + } + + // (c) The caller of this method can do with it whatever it wants because no one will be modifying this map + // nor the contained + return cloneResult; + } + + /** + * The exact same thing as the {@link #cloneReadLockManagerMap(Map)} but the map we are cloning here is the one of + * threads to deferred locks + * + * @param deferredLockManagersOriginal + * the original map taken from the conrruency manager itself + * @return A clone of that map that is a relatively stable data structure to work with since no new threads will + * register in or out in the map nor will the DeferredLockManager values be changing. As for the read lock + * manager we have no assurance as to what is happening with the cache keys themselves refered by the + * {@link DeferredLockManager} values, the cache keys are always changing their metadata as new threads come + * in to do work or finish doing work. So it is not a perfect snapshot of the state of the system, but it is + * as close as we can get. + */ + public Map cloneDeferredLockManagerMap(Map deferredLockManagersOriginal) { + // (a) Start by safeguarding the keys of the map we want to clone + // (e.g. avoid the risk of concurrent modification exception while looping over a keyset) + List mapKeys = new ArrayList<>(deferredLockManagersOriginal.keySet()); + + // (b) start the the cloning process + Map cloneResult = new HashMap<>(); + for (Thread currentKey : mapKeys) { + DeferredLockManager deferredLockManagerOriginal = deferredLockManagersOriginal.get(currentKey); + if (deferredLockManagerOriginal != null) { + DeferredLockManager deferredLockManagerClone = cloneDeferredLockManager(deferredLockManagerOriginal); + cloneResult.put(currentKey, deferredLockManagerClone); + } else { + // most likely the current thread has just finished its work + // and is no longer to be found in the original map + } + } + + // (c) The caller of this method can do with it whatever it wants because no one will be modifying this map + // nor the contained + return cloneResult; + + } + + /** + * Clone an original {@link DeferredLockManager} so that our algorithms of state dump or dead lock search can safely + * work ina stable model state that is not constantly changing. + * + * @param deferredLockManagerOriginal + * an object that is originating from the map of thread to deferred locks from the concurrency manager + * class. We do not want to be carrying around the original object while try to make a dump/snapshot of + * the current state of the concurrency manager since these objects are always mutating. Locks are being + * acquired and released etc... All the tiem. The only objest thta will be stable are those of threads + * involved ina dead lock. And those are the threads that matter the most to us anyway. + * @return a cloned deferred lock manager. The newly created deferred lock manager will have its vectors of cache + * keys holding references the same cache keys as the original object. The cache keys themselves are not + * cloned. That measn that the DeferredLockManager will be immuatable in terms of its vectors and held + * references. But the objects it refers to (e.g. cache keys) can be mutating all the time if new readers or + * active threads arrive. + */ + @SuppressWarnings("rawtypes") + public DeferredLockManager cloneDeferredLockManager(DeferredLockManager deferredLockManagerOriginal) { + // (a) Start by cloning from the original the two vectors of cache keys is administers + Vector cloneOfActiveLocks = (Vector) deferredLockManagerOriginal.getActiveLocks().clone(); + Vector cloneOfDeferredLocks = (Vector) deferredLockManagerOriginal.getDeferredLocks().clone(); + + // (b) Build our clone object + DeferredLockManager deferredLockManagerClone = new DeferredLockManager(); + deferredLockManagerClone.setIsThreadComplete(deferredLockManagerOriginal.isThreadComplete()); + + // NOTE: the DeferredLockManager follows a very bad practice + // it gives direct acess to its internal state from outside + // it gives us direct access to its referred lists + // so the internal private state of the deferredLockManager can be modified directly from the outisde + // by anybody... + // but we use the apis we have access to. + deferredLockManagerClone.getActiveLocks().addAll(cloneOfActiveLocks); + deferredLockManagerClone.getDeferredLocks().addAll(cloneOfDeferredLocks); + return deferredLockManagerClone; + } + + /** + * Enrich the mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey by setting on the cache keys the threads + * that are stuck trying to acquire the cache key. + * + * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey + * the map we are trying to enirhc with additional information + * @param unifiedMapOfThreadsStuckTryingToAcquireWriteLock + * a map telling us about threads that at a certain point in time were not progressing anywhere because + * they were waiting to acquire a write lock. These are threads either stuck on the concurrency manager + * or in the write lock manager during a transaction commmit + * + * + */ + public void enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoThreadsStuckOnAcquire( + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, + Map> unifiedMapOfThreadsStuckTryingToAcquireWriteLock) { + + // (a) Loop over each thread that registered itself as being waiting to lock a cache key + for (Map.Entry> currentEntry : unifiedMapOfThreadsStuckTryingToAcquireWriteLock + .entrySet()) { + Thread currentThread = currentEntry.getKey(); + for (ConcurrencyManager cacheKeyThreadIsWaitingToAcquire : currentEntry.getValue()) { + CacheKeyToThreadRelationships dto = get(cacheKeyThreadIsWaitingToAcquire, mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey); + dto.addThreadsKnownToBeStuckTryingToAcquireLock(currentThread); + } + } + } + + /** + * Enrich the mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey by setting on the cache keys the threads + * that are stuck trying to acquire the cache key with a read lock. These are threads stuck on the + * {@link org.eclipse.persistence.internal.helper.ConcurrencyManager#acquireReadLock()} + * + * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey + * the map we are trying to enirhc with additional information + * @param mapThreadToWaitOnAcquireReadLockClone + * a map telling us about threads that at a certain point in time were not progressing anywhere because + * they were waiting to acquire a lock. + * + * + */ + public void enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoThreadsStuckOnAcquireLockForReading( + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, + Map mapThreadToWaitOnAcquireReadLockClone) { + + // (a) Loop over each thread that registered itself as being waiting to lock a cache key + for (Map.Entry currentEntry : mapThreadToWaitOnAcquireReadLockClone.entrySet()) { + Thread currentThread = currentEntry.getKey(); + ConcurrencyManager cacheKeyThreadIsWaitingToAcquire = currentEntry.getValue(); + CacheKeyToThreadRelationships dto = get(cacheKeyThreadIsWaitingToAcquire, + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey); + dto.addThreadsKnownToBeStuckTryingToAcquireLockForReading(currentThread); + } + } + + /** + * Enrich the mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey based on the read locks + * + * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey + * a map whose metadata we need to enrich + * @param readLockManagerMapClone + * map cloned from the original map and that gives us a snapshot of threads that acquired read locks + */ + public void enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoAboutReadLocks( + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, + Map readLockManagerMapClone) { + + // (a) Loop over each thread that is regisered as having acquired read locks + for (Map.Entry currentEntry : readLockManagerMapClone.entrySet()) { + Thread currentThread = currentEntry.getKey(); + ReadLockManager currentValue = currentEntry.getValue(); + // (b) loop over each read lock acquired by the current thread + // enrich the map of cache key to thread doing something in respect to the cache key + for (ConcurrencyManager cacheKeyAcquiredReadLock : currentValue.getReadLocks()) { + CacheKeyToThreadRelationships dto = get(cacheKeyAcquiredReadLock, + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey); + dto.addThreadsThatAcquiredReadLock(currentThread); + } + } + } + + /** + * Enrich our map map of cache key to threads having a relationship with that object in regards to active locks on + * the cache key and deferred locks on the cache key + * + * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey + * the map we want to enrich with more information + * @param deferredLockManagerMapClone + * the cloned map with information about threads and their deferred locks. + */ + public void enrichMapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKeyInfoAboutActiveAndDeferredLocks( + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, + Map deferredLockManagerMapClone) { + + // (a) Loop over each thread that has a deferred lock manager + for (Map.Entry currentEntry : deferredLockManagerMapClone.entrySet()) { + Thread currentThread = currentEntry.getKey(); + DeferredLockManager currentValue = currentEntry.getValue(); + + // (b) First we focus on the active locks owned by the thread + // enrich the map of cache key to thread doing something in respect to the cache key + for (Object activeLockObj : currentValue.getActiveLocks()) { + ConcurrencyManager activeLock = (ConcurrencyManager) activeLockObj; + CacheKeyToThreadRelationships dto = get(activeLock, mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey); + dto.addThreadsThatAcquiredActiveLock(currentThread); + } + + // (c) Now we go over the defferred locks on this thread + // (e.g. object locks that it could not acquire because some other thread was active at the time owning the + // lock) + for (Object deferredLockObj : currentValue.getDeferredLocks()) { + ConcurrencyManager deferredLock = (ConcurrencyManager) deferredLockObj; + CacheKeyToThreadRelationships dto = get(deferredLock, mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey); + dto.addThreadsThatAcquiredDeferredLock(currentThread); + } + } + } + + /** + * Helper method to make sure we never get null dto from the + * mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey. + * + * @param cacheKey + * the cache key we are search for + * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey + * the map of cache key to concurrency manager locking metadata + * @return never returls null. If the cache key is not yet in the map a ney entry is returned. + */ + protected CacheKeyToThreadRelationships get(ConcurrencyManager cacheKey, + Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey) { + if (!mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey.containsKey(cacheKey)) { + mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey.put(cacheKey, + new CacheKeyToThreadRelationships(cacheKey)); + } + return mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey.get(cacheKey); + } + + + + /** + * If when we are decrement the counter of number of readers on a cache key we find ourselves lacking the read lock + * manager at the time of the decrement we want to log a big fat error on the server log protecting that the current + * thread is misbehaving. + * + * @param currentNumberOfReaders + * the current count of readers on the cache key about to be decremented + * @param decrementedNumberOfReaders + * the number of readers of the cache key if we subtract one reader + * @param cacheKey + * the cache key that is about to suffer a decrement on the number of readers + */ + public String readLockManagerProblem01CreateLogErrorMessageToIndicateThatCurrentThreadHasNullReadLockManagerWhileDecrementingNumberOfReaders( + final int currentNumberOfReaders, final int decrementedNumberOfReaders, ConcurrencyManager cacheKey) { + + Thread currentThread = Thread.currentThread(); + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_read_lock_manager_problem01", new Object[] {currentThread.getName(), currentNumberOfReaders, decrementedNumberOfReaders, + ConcurrencyUtil.SINGLETON.createToStringExplainingOwnedCacheKey(cacheKey), enrichGenerateThreadDumpForCurrentThread(), new Date()})); + AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE, writer.toString(), new Object[] {}, false); + return writer.toString(); + } + + public String readLockManagerProblem02ReadLockManageHasNoEntriesForThread(ConcurrencyManager cacheKey, long threadId) { + Thread currentThread = Thread.currentThread(); + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_read_lock_manager_problem02", new Object[] {currentThread.getName(), SINGLETON.createToStringExplainingOwnedCacheKey(cacheKey), + threadId, enrichGenerateThreadDumpForCurrentThread(), ConversionManager.getDefaultManager().convertObject(new Date(), String.class).toString()})); + // We do log immediately the error as we spot it + AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE, writer.toString(), new Object[] {}, false); + // we also return the error message we just logged to added it to our tracing permanently + return writer.toString(); + } + + public String readLockManagerProblem03ReadLockManageHasNoEntriesForThread(ConcurrencyManager cacheKey, long threadId) { + Thread currentThread = Thread.currentThread(); + StringWriter writer = new StringWriter(); + writer.write(TraceLocalization.buildMessage("concurrency_util_read_lock_manager_problem03", new Object[] {currentThread.getName(), SINGLETON.createToStringExplainingOwnedCacheKey(cacheKey), + threadId, enrichGenerateThreadDumpForCurrentThread(), ConversionManager.getDefaultManager().convertObject(new Date(), String.class).toString()})); + // We do log immediately the error as we spot it + AbstractSessionLog.getLog().log(SessionLog.SEVERE, SessionLog.CACHE, writer.toString(), new Object[] {}, false); + // we also return the error message we just logged to added it to our tracing permanently + return writer.toString(); + } + + /** + * The concurrency managers about to acquire a cache key. And since we have been suffering from cache corruption on + * the acquire read locks we need to collect a lot more information about the time of acquisition of a read lock. + * + * @param concurrencyManager + * the cache key we are about to increment and acquire for reading + * @return object that have all the context information to allow us to know when and where + * exactly this key acquisition took place. + */ + public ReadLockAcquisitionMetadata createReadLockAcquisitionMetadata(ConcurrencyManager concurrencyManager) { + final boolean isAllowTakingStackTraceDuringReadLockAcquisition = isAllowTakingStackTraceDuringReadLockAcquisition(); + String currentThreadStackTraceInformation = TraceLocalization.buildMessage("concurrency_util_read_lock_acquisition_metadata"); + long currentThreadStackTraceInformationCpuTimeCostMs = 0l; + if (isAllowTakingStackTraceDuringReadLockAcquisition) { + long startTimeMillis = System.currentTimeMillis(); + currentThreadStackTraceInformation = enrichGenerateThreadDumpForCurrentThread(); + long endTimeMillis = System.currentTimeMillis(); + currentThreadStackTraceInformationCpuTimeCostMs = endTimeMillis - startTimeMillis; + } + int numberOfReadersOnCacheKeyBeforeIncrementingByOne = concurrencyManager.getNumberOfReaders(); + // data in ReadLockAcquisitionMetadata are immutable it reflects an accurate snapshot of the time of acquisition + return new ReadLockAcquisitionMetadata(concurrencyManager, numberOfReadersOnCacheKeyBeforeIncrementingByOne, + currentThreadStackTraceInformation, currentThreadStackTraceInformationCpuTimeCostMs); + } + + private int getIntProperty(final String key, final int defaultValue) { + String value = (PrivilegedAccessHelper.shouldUsePrivilegedAccess()) ? + AccessController.doPrivileged(new PrivilegedGetSystemProperty(key, String.valueOf(defaultValue))) + : System.getProperty(key, String.valueOf(defaultValue)); + if (value != null) { + try { + return Integer.parseInt(value.trim()); + } catch (Exception ignoreE) { + return defaultValue; + } + } + return defaultValue; + } + + private long getLongProperty(final String key, final long defaultValue) { + String value = (PrivilegedAccessHelper.shouldUsePrivilegedAccess()) ? + AccessController.doPrivileged(new PrivilegedGetSystemProperty(key, String.valueOf(defaultValue))) + : System.getProperty(key, String.valueOf(defaultValue)); + if (value != null) { + try { + return Long.parseLong(value.trim()); + } catch (Exception ignoreE) { + return defaultValue; + } + } + return defaultValue; + } + + private boolean getBooleanProperty(final String key, final boolean defaultValue) { + String value = (PrivilegedAccessHelper.shouldUsePrivilegedAccess()) ? + AccessController.doPrivileged(new PrivilegedGetSystemProperty(key, String.valueOf(defaultValue))) + : System.getProperty(key, String.valueOf(defaultValue)); + if (value != null) { + try { + return Boolean.parseBoolean(value.trim()); + } catch (Exception ignoreE) { + return defaultValue; + } + } + return defaultValue; + } +} \ No newline at end of file diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ReadLockManager.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ReadLockManager.java index 0975400868d..451197e981c 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ReadLockManager.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/ReadLockManager.java @@ -14,12 +14,33 @@ // Oracle - initial API and implementation package org.eclipse.persistence.internal.helper; -import org.eclipse.persistence.internal.helper.type.ReadLockAcquisitionMetadata; - -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Vector; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.eclipse.persistence.internal.helper.type.ReadLockAcquisitionMetadata; + +/** + * This essential class tracks metadata about threads acquiring and releasing read locks. Such information becomes + * crucial during system deadlocks, allowing us to analyze the resources desired by all threads participating in the + * EclipseLink concurrency layer. + * + * When deadlocks occur, we need insights into: - Threads stuck while attempting to acquire write locks. - Threads + * engaged in object building and waiting within the release deferred lock process. - Threads that have acquired cache + * keys for reading but have not yet released them. + * + * This metadata contributes to the big picture of understanding all activities within the concurrency layer. + * + * Refer to {@link org.eclipse.persistence.internal.helper.ConcurrencyManager#READ_LOCK_MANAGERS}, a static hash map + * that provides details for each individual thread working in the concurrency layer, about the read locks it is + * currently acquiring for reading. + */ public class ReadLockManager { public static final int FIRST_INDEX_OF_COLLECTION = 0; @@ -30,19 +51,19 @@ public class ReadLockManager { * the readers count on a cache key whenever * */ - private final Vector readLocks = new Vector<>(1); + protected final Vector readLocks = new Vector<>(1); /** * We have seen that the read locks vector we have is very insufficient. We keep it for now due to the tracing code * that is making use of it. But we also want to have a new tracing map with a lot more metadata */ - private final Map> mapThreadToReadLockAcquisitionMetadata = new HashMap<>(); + protected final Map> mapThreadToReadLockAcquisitionMetadata = new HashMap<>(); /** * This is a field that will never be cleared. It will be getting ADDs if any problem is detected when we try remove * a read lock from our tracing. */ - private final List removeReadLockProblemsDetected = new ArrayList<>(); + protected final List removeReadLockProblemsDetected = new ArrayList<>(); private final Lock instanceLock = new ReentrantLock(); @@ -52,17 +73,39 @@ public class ReadLockManager { public void addReadLock(ConcurrencyManager concurrencyManager) { instanceLock.lock(); try { + // (a) paranoia code - make sure we are not being provided a null concurrency manager ever + if(concurrencyManager == null) { + return ; + } + + // (b) Basic variable initializaton final Thread currentThread = Thread.currentThread(); final long currentThreadId = currentThread.getId(); ReadLockAcquisitionMetadata readLockAcquisitionMetadata = ConcurrencyUtil.SINGLETON.createReadLockAcquisitionMetadata(concurrencyManager); + // (c) update the read locks owned by the current thread this.readLocks.add(FIRST_INDEX_OF_COLLECTION, concurrencyManager); + + // (d) make sure our call to get the list of ReadLockAcquisitionMetadata mapThreadToReadLockAcquisitionMetadata for our current thread + // is never null. if (!mapThreadToReadLockAcquisitionMetadata.containsKey(currentThreadId)) { List newList = Collections.synchronizedList(new ArrayList()); mapThreadToReadLockAcquisitionMetadata.put(currentThreadId, newList); } List acquiredReadLocksInCurrentTransactionList = mapThreadToReadLockAcquisitionMetadata.get(currentThreadId); - acquiredReadLocksInCurrentTransactionList.add(FIRST_INDEX_OF_COLLECTION, readLockAcquisitionMetadata); + +// acquiredReadLocksInCurrentTransactionList.add(FIRST_INDEX_OF_COLLECTION, readLockAcquisitionMetadata); + // NOTE: + // This if different than null check here should really not be needed + // This if check is a piece of paranoioa code. + // However we are adding it here because of a Null pointer exception + // org.eclipse.persistence.internal.helper.ReadLockManager.removeReadLock(ReadLockManager.java:84) + // whereby the conclusion of that NPException is that somehow our + // list of ReadLockAcquisitionMetadata was holding NULL entries within it + // this if will certainly not hurt us + if(readLockAcquisitionMetadata!=null) { + acquiredReadLocksInCurrentTransactionList.add(FIRST_INDEX_OF_COLLECTION, readLockAcquisitionMetadata); + } } finally { instanceLock.unlock(); } @@ -79,19 +122,48 @@ public void addReadLock(ConcurrencyManager concurrencyManager) { public void removeReadLock(ConcurrencyManager concurrencyManager) { instanceLock.lock(); try { + // (a) paranoia code - make sure we are not being provided a null concurrency manager ever + if(concurrencyManager == null) { + return ; + } + // (b) cleanup the concurrencyManager out of the vector of readlocks + this.readLocks.remove(concurrencyManager); + + // (c) basic variable initialization to work on the cleanup of the ReadLockAcquisitionMetadata final Thread currentThread = Thread.currentThread(); final long currentThreadId = currentThread.getId(); boolean readLockManagerHasTracingAboutAddedReadLocksForCurrentThread = mapThreadToReadLockAcquisitionMetadata.containsKey(currentThreadId); + // (d) make sure our call to get the list of ReadLockAcquisitionMetadata mapThreadToReadLockAcquisitionMetadata for our current thread + // is never null. if (!readLockManagerHasTracingAboutAddedReadLocksForCurrentThread) { String errorMessage = ConcurrencyUtil.SINGLETON.readLockManagerProblem02ReadLockManageHasNoEntriesForThread(concurrencyManager, currentThreadId); removeReadLockProblemsDetected.add(errorMessage); return; } + // (d) Search for the ReadLockAcquisitionMetadata associated to the current input cache key List readLocksAcquiredDuringCurrentThread = mapThreadToReadLockAcquisitionMetadata.get(currentThreadId); ReadLockAcquisitionMetadata readLockAquisitionMetadataToRemove = null; + boolean anyNullReadLockAcquisitionMetadataEntryFound = false; for (ReadLockAcquisitionMetadata currentReadLockAcquisitionMetadata : readLocksAcquiredDuringCurrentThread) { + // (d.i) + if(currentReadLockAcquisitionMetadata == null) { + // note: it should be impossible to enter this code area here + // the addReadLock appears to be rock solid, we cannot see any way to add a null value into the list of ReadLockAcquisitionMetadata + // however we have had a report of a null pointer exception doing currentReadLockAcquisitionMetadata getCacheKeyWhoseNumberOfReadersThreadIsIncrementing + // at: + // org.eclipse.persistence.internal.helper.ReadLockManager.removeReadLock(ReadLockManager.java:84) ~[eclipselink-2.7.6-weblogic.jar + // which means a NPE in the: + // line of code ConcurrencyManager currentCacheKeyObjectToCheck = currentReadLockAcquisitionMetadata.getCacheKeyWhoseNumberOfReadersThreadIsIncrementing(); + removeReadLockProblemsDetected.add("removeReadLock: we have detected the exisence of a NULL currentReadLockAcquisitionMetadata in the list readLocksAcquiredDuringCurrentThread. " + + " this should be impossible. The add logic code tries to make sure there is no scenario that would allow this to ever happen. "); + anyNullReadLockAcquisitionMetadataEntryFound = true; + // skip the next ReadLockAcquisitionMetadata entry in the list + continue; + } + + // (d.ii) check if the currentReadLockAcquisitionMetadata is the one we are trying to remove out the list ConcurrencyManager currentCacheKeyObjectToCheck = currentReadLockAcquisitionMetadata.getCacheKeyWhoseNumberOfReadersThreadIsIncrementing(); boolean dtoToRemoveFound = concurrencyManager.getConcurrencyManagerId() == currentCacheKeyObjectToCheck.getConcurrencyManagerId(); if (dtoToRemoveFound) { @@ -100,6 +172,14 @@ public void removeReadLock(ConcurrencyManager concurrencyManager) { } } + // (e) Paranoia code - if our list of ReadLockAcquisitionMetadata seems to be poisoned with null entries, get rid of those + if(anyNullReadLockAcquisitionMetadataEntryFound) { + readLocksAcquiredDuringCurrentThread.removeAll(Collections.singleton(null)); + readLocks.removeAll(Collections.singleton(null)); + } + + // (f) If we find no ReadLockAcquisitionMetadata associated to the input ConcurrencyManager there is a bug somewhere + // for each read lock in our vector of read locks we should also have added acquisition metadata if (readLockAquisitionMetadataToRemove == null) { String errorMessage = ConcurrencyUtil.SINGLETON.readLockManagerProblem03ReadLockManageHasNoEntriesForThread(concurrencyManager, currentThreadId); removeReadLockProblemsDetected.add(errorMessage); @@ -122,7 +202,10 @@ public void removeReadLock(ConcurrencyManager concurrencyManager) { public List getReadLocks() { instanceLock.lock(); try { - return Collections.unmodifiableList(readLocks); + // Simply returning an unmodifiable list is insufficient. + // We must provide a completely independent list to avoid issues when producing massive dumps. + // These dumps are iterated over in parallel with backend threads that are acquiring and releasing read locks. + return Collections.unmodifiableList(new ArrayList<>(readLocks)); } finally { instanceLock.unlock(); } @@ -144,14 +227,46 @@ public void addRemoveReadLockProblemsDetected(String problemDetected) { } } - /** Getter for {@link #mapThreadToReadLockAcquisitionMetadata} */ + /** Getter for {@link #mapThreadToReadLockAcquisitionMetadata} returns a deep clone */ public Map> getMapThreadToReadLockAcquisitionMetadata() { - return mapThreadToReadLockAcquisitionMetadata; + instanceLock.lock(); + try { + // We cannot simply return an unmodifiable map here. There are two reasons for this: + // 1. The code consuming this unmodifiable map might be surprised by changes to the map itself caused by this + // read lock manager. + // If threads continue to acquire and release read locks, it could impact this object. + // 2. Additionally, the list values contained in the map could also be affected by threads that are reading and + // releasing read locks. + // Our approach should be to provide the ConcurrencyUtil with a safe deep clone of the data structure for its + // massive dumps. + + // (a) Let's start by creating an independent result map that the caller can safely iterate over. + Map> resultMap = new HashMap<>(); + + // (b) depp clone the data strcuture + for (Entry> currentEntry : mapThreadToReadLockAcquisitionMetadata.entrySet()) { + ArrayList deepCopyOfCurrentListOfMetadata = new ArrayList<>(currentEntry.getValue()); + resultMap.put(currentEntry.getKey(), Collections.unmodifiableList(deepCopyOfCurrentListOfMetadata) ); + } + + // (c) even if our result map is deep clone of our internal sate + // we still want to return it as unmodifable so that callers do not have the illusion + // they are able to hack the state of the read lock manager from the outside. + // If any code tris to manipulate the returned clone they should get a blow up to be dispelled of any illiusions + return Collections.unmodifiableMap(resultMap); + } finally { + instanceLock.unlock(); + } } /** Getter for {@link #removeReadLockProblemsDetected} */ public List getRemoveReadLockProblemsDetected() { - return removeReadLockProblemsDetected; + instanceLock.lock(); + try { + return Collections.unmodifiableList(new ArrayList<>(removeReadLockProblemsDetected)) ; + } finally { + instanceLock.unlock(); + } } /** diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/type/ConcurrencyManagerState.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/type/ConcurrencyManagerState.java index 94989c68a9f..74efb1ea186 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/type/ConcurrencyManagerState.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/helper/type/ConcurrencyManagerState.java @@ -14,15 +14,15 @@ // Oracle - initial API and implementation package org.eclipse.persistence.internal.helper.type; -import org.eclipse.persistence.internal.helper.ConcurrencyManager; -import org.eclipse.persistence.internal.helper.DeferredLockManager; -import org.eclipse.persistence.internal.helper.ReadLockManager; +import static java.util.Collections.unmodifiableMap; +import static java.util.Collections.unmodifiableSet; import java.util.Map; import java.util.Set; -import static java.util.Collections.unmodifiableMap; -import static java.util.Collections.unmodifiableSet; +import org.eclipse.persistence.internal.helper.ConcurrencyManager; +import org.eclipse.persistence.internal.helper.DeferredLockManager; +import org.eclipse.persistence.internal.helper.ReadLockManager; public class ConcurrencyManagerState { @@ -84,6 +84,17 @@ public class ConcurrencyManagerState { */ private final Map> mapThreadToObjectIdWithWriteLockManagerChangesClone; + /** + * This field relates to the + * {@link org.eclipse.persistence.internal.sessions.AbstractSession#THREADS_INVOLVED_WITH_MERGE_MANAGER_WAITING_FOR_DEFERRED_CACHE_KEYS_TO_NO_LONGER_BE_ACQUIRED} + * and to the bug https://github.com/eclipse-ee4j/eclipselink/issues/2094 it allows us to keep an eye on threads + * that are at post-commit phase and are trying to merge their change set into the original objects in the cache. + * When this is taking place some of the cache keys that the merge manager is needing might be locked by other + * threads. This can lead to deadlocks, if our merge manager thread happens the be the owner of cache keys that + * matter to the owner of the cache keys the merge manager will need to acquire. + */ + private Map mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired; + /** * Create a new ConcurrencyManagerState. * @@ -97,6 +108,7 @@ public class ConcurrencyManagerState { * @param mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone * @param mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey * @param mapThreadToObjectIdWithWriteLockManagerChangesClone + * @param mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired */ public ConcurrencyManagerState( Map readLockManagerMapClone, @@ -108,7 +120,8 @@ public ConcurrencyManagerState( Set setThreadWaitingToReleaseDeferredLocksClone, Map mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone, Map mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey, - Map> mapThreadToObjectIdWithWriteLockManagerChangesClone) { + Map> mapThreadToObjectIdWithWriteLockManagerChangesClone, + Map mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired) { super(); this.readLockManagerMapClone = readLockManagerMapClone; this.deferredLockManagerMapClone = deferredLockManagerMapClone; @@ -120,6 +133,7 @@ public ConcurrencyManagerState( this.mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone = mapThreadsThatAreCurrentlyWaitingToReleaseDeferredLocksJustificationClone; this.mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey = mapOfCacheKeyToDtosExplainingThreadExpectationsOnCacheKey; this.mapThreadToObjectIdWithWriteLockManagerChangesClone = mapThreadToObjectIdWithWriteLockManagerChangesClone; + this.mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired = mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired; } /** Getter for {@link #readLockManagerMapClone} */ @@ -171,4 +185,11 @@ public Map getUnifiedMapOfThreadsStuckTryingToAcquireWriteLockMe public Map getMapThreadToWaitOnAcquireReadLockCloneMethodName() { return unmodifiableMap(mapThreadToWaitOnAcquireReadLockCloneMethodName); } + + /** Getter for {@link #mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired} */ + public Map getMapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired() { + return unmodifiableMap(mapThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired); + } + + } diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java index 30c6ae177a3..fdf5617ca56 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java @@ -14,14 +14,16 @@ // Oracle - initial API and implementation from Oracle TopLink package org.eclipse.persistence.internal.identitymaps; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + import org.eclipse.persistence.exceptions.ConcurrencyException; -import org.eclipse.persistence.internal.helper.*; +import org.eclipse.persistence.internal.helper.ConcurrencyManager; import org.eclipse.persistence.internal.sessions.AbstractRecord; import org.eclipse.persistence.queries.ObjectBuildingQuery; import org.eclipse.persistence.sessions.DatabaseRecord; import org.eclipse.persistence.sessions.Record; -import java.util.concurrent.TimeUnit; /** *

Purpose: Container class for storing objects in an IdentityMap. *

Responsibilities:

    @@ -32,11 +34,52 @@ */ public class CacheKey extends ConcurrencyManager implements Cloneable { + public static final AtomicLong CACHE_KEY_INSTANCE_CREATION_NUMBER_GENERATOR = new AtomicLong(1l); + //These constants are used in extended cache logging to compare cache item creation thread and thread which picking item from the cache public final long CREATION_THREAD_ID = Thread.currentThread().getId(); public final String CREATION_THREAD_NAME = String.copyValueOf(Thread.currentThread().getName().toCharArray()); public final long CREATION_THREAD_HASHCODE = Thread.currentThread().hashCode(); + /** + * A technical instanace creation number we use as "last-second" saving value + * in case the cache key is found to be corrupted with Null Key value as see in {@code TRK-25534} + * we use this value to ensure that the EQUALS and HASH CODE methods never blow up on account of a null corrupted primary key. + */ + protected final long cacheKeyInstanceCreationNumber = CACHE_KEY_INSTANCE_CREATION_NUMBER_GENERATOR.getAndIncrement(); + + //TODO RFELCMAN enum - begin? + /** + * Value we want to set into the field + * {@link #org.eclipse.persistence.internal.identitymaps.CacheKey.primaryKeyAtTimeOfCreation} + * if the cache key instances is created with the no argument constructor. + */ + protected static final String PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_NO_ARGUMENT_CONTRUCTOR_USED = "PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_NO_ARGUMENT_CONTRUCTOR_USED"; + /** + * Value we want to set into the field + * {@link #org.eclipse.persistence.internal.identitymaps.CacheKey.primaryKeyAtTimeOfCreation} + * if the cache key instances is created with a constructor that allows the primary key as an argument but the primary key is null.. + */ + protected static final String PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_A_NULL_PRIMARY_KEY_WAS_PASSED_IN_TO_OBJECT_CONSTRUCTOR = "PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_A_NULL_PRIMARY_KEY_WAS_PASSED_IN_TO_OBJECT_CONSTRUCTOR"; + //TODO RFELCMAN enum - end? + + /** + * This field relates to an unexpected situation see in issue + * {@code TRK-25534} + * whereby the manipualted eclipselink code created to trace the acuisition and contention of concurrency manager + * resources started blowing up during the release of read locks because for whatever reasons + * the cache key bein released from reading managed to find itself with the primary key set to null, + * This is something we never saw before. + * Even though we normally never see the key field set to null since there is no purpose to cache key without a key + * the fact is that it hapened and methods like Equals and HashCode were completely broken. + * One could not longer remove cache keys from Lists, Vectors or check if the cache keys existed in an hash map. + * + * So this field is meant to help us know what was itented purpose with which the key was originally created. + * + * + */ + protected final Object primaryKeyAtTimeOfCreation; + /** The key holds the vector of primary key values for the object. */ protected Object key; @@ -103,10 +146,18 @@ public class CacheKey extends ConcurrencyManager implements Cloneable { * by switching to an interface. */ protected CacheKey(){ + // make sure we trace the primary key for which the cache key was created in an immutable field + this.primaryKeyAtTimeOfCreation = PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_NO_ARGUMENT_CONTRUCTOR_USED; } public CacheKey(Object primaryKey) { this.key = primaryKey; + // make sure we trace the primary key for which the cache key was created in an immutable field + if(primaryKey == null) { + this.primaryKeyAtTimeOfCreation = PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_A_NULL_PRIMARY_KEY_WAS_PASSED_IN_TO_OBJECT_CONSTRUCTOR; + } else { + this.primaryKeyAtTimeOfCreation = primaryKey; + } } public CacheKey(Object primaryKey, Object object, Object lockValue) { @@ -116,6 +167,13 @@ public CacheKey(Object primaryKey, Object object, Object lockValue) { if (object != null) { setObject(object); } + + // make sure we trace the primary key for which the cache key was created in an immutable field + if(primaryKey == null) { + this.primaryKeyAtTimeOfCreation = PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_A_NULL_PRIMARY_KEY_WAS_PASSED_IN_TO_OBJECT_CONSTRUCTOR; + } else { + this.primaryKeyAtTimeOfCreation = primaryKey; + } } public CacheKey(Object primaryKey, Object object, Object lockValue, long readTime, boolean isIsolated) { @@ -127,11 +185,19 @@ public CacheKey(Object primaryKey, Object object, Object lockValue, long readTim } this.readTime = readTime; this.isIsolated = isIsolated; + + // make sure we trace the primary key for which the cache key was created in an immutable field + if(primaryKey == null) { + this.primaryKeyAtTimeOfCreation = PRIMARY_KEY_AT_TIME_OF_CONSTRUCTION_NULL_A_NULL_PRIMARY_KEY_WAS_PASSED_IN_TO_OBJECT_CONSTRUCTOR; + } else { + this.primaryKeyAtTimeOfCreation = primaryKey; + } } /** * Acquire the lock on the cache key object. */ + @Override public void acquire() { if (this.isIsolated) { this.depth.incrementAndGet(); @@ -144,6 +210,7 @@ public void acquire() { * Acquire the lock on the cache key object. For the merge process * called with true from the merge process, if true then the refresh will not refresh the object */ + @Override public void acquire(boolean forMerge) { if (this.isIsolated) { this.depth.incrementAndGet(); @@ -156,6 +223,7 @@ public void acquire(boolean forMerge) { * Acquire the lock on the cache key object. But only if the object has no lock on it * Added for CR 2317 */ + @Override public boolean acquireNoWait() { if (this.isIsolated) { this.depth.incrementAndGet(); @@ -186,6 +254,7 @@ public boolean acquireIfUnownedNoWait() { * Added for CR 2317 * called with true from the merge process, if true then the refresh will not refresh the object */ + @Override public boolean acquireNoWait(boolean forMerge) { if (this.isIsolated) { this.depth.incrementAndGet(); @@ -199,6 +268,7 @@ public boolean acquireNoWait(boolean forMerge) { * Added for CR 2317 * called with true from the merge process, if true then the refresh will not refresh the object */ + @Override public boolean acquireWithWait(boolean forMerge, int wait) { if (this.isIsolated) { this.depth.incrementAndGet(); @@ -210,6 +280,7 @@ public boolean acquireWithWait(boolean forMerge, int wait) { /** * Acquire the deferred lock. */ + @Override public void acquireDeferredLock() { if (this.isIsolated) { this.depth.incrementAndGet(); @@ -256,6 +327,7 @@ public void acquireLock(ObjectBuildingQuery query){ * This can be called to ensure the cache key has a valid built object. * It does not hold a lock, so the object could be refreshed afterwards. */ + @Override public void checkReadLock() { if (this.isIsolated) { return; @@ -268,6 +340,7 @@ public void checkReadLock() { * This can be called to ensure the cache key has a valid built object. * It does not hold a lock, so the object could be refreshed afterwards. */ + @Override public void checkDeferredLock() { if (this.isIsolated) { return; @@ -278,6 +351,7 @@ public void checkDeferredLock() { /** * Acquire the read lock on the cache key object. */ + @Override public void acquireReadLock() { if (this.isIsolated) { return; @@ -288,6 +362,7 @@ public void acquireReadLock() { /** * Acquire the read lock on the cache key object. Return true if acquired. */ + @Override public boolean acquireReadLockNoWait() { if (this.isIsolated) { return true; @@ -299,6 +374,7 @@ public boolean acquireReadLockNoWait() { * INTERNAL: * Clones itself. */ + @Override public Object clone() { Object object = null; @@ -316,6 +392,7 @@ public Object clone() { * If anObject is a CacheKey, do further comparison, otherwise, return false. * @see CacheKey#equals(CacheKey) */ + @Override public boolean equals(Object object) { try { return equals((CacheKey)object); @@ -329,10 +406,33 @@ public boolean equals(Object object) { * Use an index compare, because it is much faster than enumerations. */ public boolean equals(CacheKey key) { - if (key.key == null || this.key == null) { + if(key == null) { + // Bugfix related to tracker TRK-25534 + // make sure the key we are comparing against is not the null value + // NPE exception on equals was probably not at all that the key to compare against was null + // but rather that this instance had as primary key the null value since we also saw blow ups in the has code method + // but since it is impossible to tell what data was corrupted we make this code safe to compare against null as well + return false; + } + if (this == key) { + return true; + } + + if (key.key == null) { return false; } - return this.key.equals(key.key); + + // Bugfix related to tracker TRK-25534 + // we do not know how it is possible that a cache key existed in the system with primary key set to null + // but we have seen a stack trace revealing a release acquired read lock piece locking blowing up + // because the HackingEclipseReadLockManager.removeReadLock was dealing with a cache key with null primary key + if(this.key != null) { + return this.key.equals(key.key); + } else { + // we could just return false since the only way to cache keys have the same creation number is if they + // are the same object instance which is already being checked above + return this.getCacheKeyInstanceCreationNumber() == key.getCacheKeyInstanceCreationNumber(); + } } /** @@ -350,9 +450,28 @@ public Object getKey() { return key; } + /** + * Just like the method getKey {@link #key} but this method is skeptical about the fact + * that cache keys are in "sane" state. It assumes that it is possible for cache keys to be corrupted + * into having null primary key and in that case return the value of the primary key at the ime of creation. + * @return The primary key associated to this cache key if it is not null. If it is null than we return a the primary key at the time of creation of this instance object. + */ + public Object getKeyEnsuredToBeNotNull() { + // (a) the default pattern here is to simply return the primary key that was set on the cache key object + if(this.key != null) { + return this.key; + } + // (b) however, if for some unexpected reason the key of the cache key has be cleared and set to null + // we want to return here the Non null value that we setup into this cache key when the object was originally created + // this value is guarnteed to be not null since if the constructor has null primary key we have used a string to indicate + // that the cache key was created with null primary key + return this.getPrimaryKeyAtTimeOfCreation(); + } + /** * Return the active thread. */ + @Override public Thread getActiveThread() { if (this.isIsolated) { if (this.depth.get() > 0) { @@ -403,8 +522,21 @@ public Object getWriteLockValue() { /** * Overrides hashCode() in Object to use the primaryKey's hashCode for storage in data structures. */ + @Override public int hashCode() { - return this.key.hashCode(); + if(this.key != null) { + return this.key.hashCode(); + } else { + // relates to TRK-25534 + // We do not know how eclipselink managed to get itself into this situation but we have + // seen null pointer exceptions taking place while trying to build a massive dump justification on account of + // cache keys found in the read lock manager where the with primary key was set to null + // so we need to make this code safe + final int prime = 31; + int result = 1; + result = prime * result + (int) (cacheKeyInstanceCreationNumber ^ (cacheKeyInstanceCreationNumber >>> 32)); + return result; + } } /** @@ -454,6 +586,7 @@ public int getInvalidationState() { /** * Release the lock on the cache key object. */ + @Override public void release() { if (this.isIsolated) { this.depth.decrementAndGet(); @@ -465,6 +598,7 @@ public void release() { /** * Release the deferred lock */ + @Override public void releaseDeferredLock() { if (this.isIsolated) { this.depth.decrementAndGet(); @@ -476,6 +610,7 @@ public void releaseDeferredLock() { /** * Release the read lock on the cache key object. */ + @Override public void releaseReadLock() { if (this.isIsolated) { return; @@ -552,13 +687,19 @@ public void setWriteLockValue(Object writeLockValue) { this.writeLockValue = writeLockValue; } + @Override public String toString() { int hashCode = 0; if (getObject() != null) { hashCode = getObject().hashCode(); } - return "[" + getKey() + ": " + hashCode + ": " + getWriteLockValue() + ": " + getReadTime() + ": " + getObject() + "]"; + return "[" + getKey() + ": " + hashCode + ": " + getWriteLockValue() + ": " + getReadTime() + ": " + getObject() + + // new fields introduced on account of the TRK-25534 + + " cacheKeyInstanceCreationNumber " + this.getCacheKeyInstanceCreationNumber() + + " primaryKeyAtTimeOfCreation " + this.getPrimaryKeyAtTimeOfCreation() + + "]"; } /** @@ -604,4 +745,14 @@ public Object waitForObject() { getInstanceLock().unlock(); } } + + /** Getter for {@link #cacheKeyInstanceCreationNumber} */ + public long getCacheKeyInstanceCreationNumber() { + return cacheKeyInstanceCreationNumber; + } + + /** Getter for {@link #primaryKeyAtTimeOfCreation} */ + public Object getPrimaryKeyAtTimeOfCreation() { + return primaryKeyAtTimeOfCreation; + } } diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java.ORIGIN b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java.ORIGIN new file mode 100644 index 00000000000..30c6ae177a3 --- /dev/null +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/identitymaps/CacheKey.java.ORIGIN @@ -0,0 +1,607 @@ +/* + * Copyright (c) 1998, 2024 Oracle and/or its affiliates. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v. 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0, + * or the Eclipse Distribution License v. 1.0 which is available at + * http://www.eclipse.org/org/documents/edl-v10.php. + * + * SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause + */ + +// Contributors: +// Oracle - initial API and implementation from Oracle TopLink +package org.eclipse.persistence.internal.identitymaps; + +import org.eclipse.persistence.exceptions.ConcurrencyException; +import org.eclipse.persistence.internal.helper.*; +import org.eclipse.persistence.internal.sessions.AbstractRecord; +import org.eclipse.persistence.queries.ObjectBuildingQuery; +import org.eclipse.persistence.sessions.DatabaseRecord; +import org.eclipse.persistence.sessions.Record; + +import java.util.concurrent.TimeUnit; +/** + *

    Purpose: Container class for storing objects in an IdentityMap. + *

    Responsibilities:

      + *
    • Hold key and object. + *
    • Maintain and update the current writeLockValue. + *
    + * @since TOPLink/Java 1.0 + */ +public class CacheKey extends ConcurrencyManager implements Cloneable { + + //These constants are used in extended cache logging to compare cache item creation thread and thread which picking item from the cache + public final long CREATION_THREAD_ID = Thread.currentThread().getId(); + public final String CREATION_THREAD_NAME = String.copyValueOf(Thread.currentThread().getName().toCharArray()); + public final long CREATION_THREAD_HASHCODE = Thread.currentThread().hashCode(); + + /** The key holds the vector of primary key values for the object. */ + protected Object key; + + protected Object object; + + //used to store a reference to the map this cachekey is in in cases where the + //cache key is to be removed, prevents us from having to track down the owning + //map + protected IdentityMap mapOwner; + + /** The writeLock value is being held as an object so that it might contain a number or timestamp. */ + protected Object writeLockValue; + + /** The cached wrapper for the object, used in EJB. */ + protected Object wrapper; + + /** This is used for Document Preservation to cache the record that this object was built from */ + protected Record record; + + /** This attribute is the system time in milli seconds that the object was last refreshed on */ + + //CR #4365 + // CR #2698903 - fix for the previous fix. No longer using millis. + protected long lastUpdatedQueryId; + + /** Invalidation State can be used to indicate whether this cache key is considered valid */ + protected int invalidationState = CHECK_INVALIDATION_POLICY; + + /** The following constants are used for the invalidationState variable */ + public static final int CHECK_INVALIDATION_POLICY = 0; + public static final int CACHE_KEY_INVALID = -1; + + public static final int MAX_WAIT_TRIES = 10000; + + /** The read time stores the millisecond value of the last time the object help by + this cache key was confirmed as up to date. */ + protected long readTime = 0; + + /** + * Stores if this CacheKey instance is a wrapper for the underlying CacheKey. CacheKey wrappers + * may be used with cache interceptors. + */ + protected boolean isWrapper = false; + + /** + * Stores retrieved FK values for relationships that are not stored in the Entity + */ + protected AbstractRecord protectedForeignKeys; + + /** + * Set to true if this CacheKey comes from an IsolatedClientSession, or DatabaseSessionImpl. + */ + protected boolean isIsolated; + + /** + * The ID of the database transaction that last wrote the object. + * This is used for database change notification. + */ + protected Object transactionId; + + /** + * Internal: + * Only used by subclasses that may want to wrap the cache key. Could be replaced + * by switching to an interface. + */ + protected CacheKey(){ + } + + public CacheKey(Object primaryKey) { + this.key = primaryKey; + } + + public CacheKey(Object primaryKey, Object object, Object lockValue) { + this.key = primaryKey; + this.writeLockValue = lockValue; + //bug4649617 use setter instead of this.object = object to avoid hard reference on object in subclasses + if (object != null) { + setObject(object); + } + } + + public CacheKey(Object primaryKey, Object object, Object lockValue, long readTime, boolean isIsolated) { + this.key = primaryKey; + this.writeLockValue = lockValue; + //bug4649617 use setter instead of this.object = object to avoid hard reference on object in subclasses + if (object != null) { + setObject(object); + } + this.readTime = readTime; + this.isIsolated = isIsolated; + } + + /** + * Acquire the lock on the cache key object. + */ + public void acquire() { + if (this.isIsolated) { + this.depth.incrementAndGet(); + return; + } + super.acquire(false); + } + + /** + * Acquire the lock on the cache key object. For the merge process + * called with true from the merge process, if true then the refresh will not refresh the object + */ + public void acquire(boolean forMerge) { + if (this.isIsolated) { + this.depth.incrementAndGet(); + return; + } + super.acquire(forMerge); + } + + /** + * Acquire the lock on the cache key object. But only if the object has no lock on it + * Added for CR 2317 + */ + public boolean acquireNoWait() { + if (this.isIsolated) { + this.depth.incrementAndGet(); + return true; + } + return super.acquireNoWait(false); + } + + /** + * Acquire the lock on the cache key object. Only acquire a lock if the cache key's + * active thread is not set. + * Added for Bug 5840635 + */ + + public boolean acquireIfUnownedNoWait() { + if (this.isIsolated) { + if (this.depth.get() > 0) { + return false; + } + this.depth.incrementAndGet(); + return true; + } + return super.acquireIfUnownedNoWait(false); + } + + /** + * Acquire the lock on the cache key object. But only if the object has no lock on it + * Added for CR 2317 + * called with true from the merge process, if true then the refresh will not refresh the object + */ + public boolean acquireNoWait(boolean forMerge) { + if (this.isIsolated) { + this.depth.incrementAndGet(); + return true; + } + return super.acquireNoWait(forMerge); + } + + /** + * Acquire the lock on the cache key object. But only if the object has no lock on it + * Added for CR 2317 + * called with true from the merge process, if true then the refresh will not refresh the object + */ + public boolean acquireWithWait(boolean forMerge, int wait) { + if (this.isIsolated) { + this.depth.incrementAndGet(); + return true; + } + return super.acquireWithWait(forMerge, wait); + } + + /** + * Acquire the deferred lock. + */ + public void acquireDeferredLock() { + if (this.isIsolated) { + this.depth.incrementAndGet(); + return; + } + super.acquireDeferredLock(); + } + + public void acquireLock(ObjectBuildingQuery query){ + + // PERF: Only use deferred locking if required. + // CR#3876308 If joining is used, deferred locks are still required. + if (query.requiresDeferredLocks()) { + this.acquireDeferredLock(); + + int counter = 0; + while ((this.object == null) && (counter < 1000)) { + if (this.getActiveThread() == Thread.currentThread()) { + break; + } + //must release lock here to prevent acquiring multiple deferred locks but only + //releasing one at the end of the build object call. + //bug 5156075 + this.releaseDeferredLock(); + //sleep and try again if we are not the owner of the lock for CR 2317 + // prevents us from modifying a cache key that another thread has locked. + try { + Thread.sleep(10); + } catch (InterruptedException exception) { + } + this.acquireDeferredLock(); + counter++; + } + if (counter == 1000) { + throw ConcurrencyException.maxTriesLockOnBuildObjectExceded(this.getActiveThread(), Thread.currentThread()); + } + } else { + this.acquire(); + } + } + + /** + * Check the read lock on the cache key object. + * This can be called to ensure the cache key has a valid built object. + * It does not hold a lock, so the object could be refreshed afterwards. + */ + public void checkReadLock() { + if (this.isIsolated) { + return; + } + super.checkReadLock(); + } + + /** + * Check the deferred lock on the cache key object. + * This can be called to ensure the cache key has a valid built object. + * It does not hold a lock, so the object could be refreshed afterwards. + */ + public void checkDeferredLock() { + if (this.isIsolated) { + return; + } + super.checkDeferredLock(); + } + + /** + * Acquire the read lock on the cache key object. + */ + public void acquireReadLock() { + if (this.isIsolated) { + return; + } + super.acquireReadLock(); + } + + /** + * Acquire the read lock on the cache key object. Return true if acquired. + */ + public boolean acquireReadLockNoWait() { + if (this.isIsolated) { + return true; + } + return super.acquireReadLockNoWait(); + } + + /** + * INTERNAL: + * Clones itself. + */ + public Object clone() { + Object object = null; + + try { + object = super.clone(); + } catch (Exception exception) { + throw new InternalError(exception.toString()); + } + + return object; + } + + /** + * Determine if the receiver is equal to anObject. + * If anObject is a CacheKey, do further comparison, otherwise, return false. + * @see CacheKey#equals(CacheKey) + */ + public boolean equals(Object object) { + try { + return equals((CacheKey)object); + } catch (ClassCastException incorrectType) { + return false; + } + } + + /** + * Determine if the receiver is equal to key. + * Use an index compare, because it is much faster than enumerations. + */ + public boolean equals(CacheKey key) { + if (key.key == null || this.key == null) { + return false; + } + return this.key.equals(key.key); + } + + /** + * INTERNAL: + * This method returns the system time in millis seconds at which this object was last refreshed + * CR #4365 + * CR #2698903 ... instead of using millis we will now use id's instead. Method + * renamed appropriately. + */ + public long getLastUpdatedQueryId() { + return this.lastUpdatedQueryId; + } + + public Object getKey() { + return key; + } + + /** + * Return the active thread. + */ + public Thread getActiveThread() { + if (this.isIsolated) { + if (this.depth.get() > 0) { + return Thread.currentThread(); + } else { + return null; + } + } + return super.getActiveThread(); + } + + public Object getObject() { + return object; + } + + public IdentityMap getOwningMap(){ + return this.mapOwner; + } + + /** + * INTERNAL: + * Return the current value of the Read Time variable + */ + public long getReadTime() { + return readTime; + } + + public Record getRecord() { + return record; + } + + public Object getWrapper() { + return wrapper; + } + + /** + * If a Wrapper subclasses this CacheKey this method will be used to unwrap the cache key. + * @return + */ + public CacheKey getWrappedCacheKey(){ + return this; + } + + public Object getWriteLockValue() { + return writeLockValue; + } + + /** + * Overrides hashCode() in Object to use the primaryKey's hashCode for storage in data structures. + */ + public int hashCode() { + return this.key.hashCode(); + } + + /** + * Returns true if the protectedForeignKeys record is non-null and non-empty, false otherwise. + */ + public boolean hasProtectedForeignKeys() { + return (this.protectedForeignKeys != null) && (this.protectedForeignKeys.size() > 0); + } + + /** + * Returns true if this CacheKey is from an IsolatedClientSession + */ + public boolean isIsolated() { + return isIsolated; + } + + /** + * Returns true if this Instance of CacheKey is a wrapper and should be unwrapped before passing + * to IdentityMap APIs. Wrapped CacheKeys may be used in the Cache Interceptors. + */ + public boolean isWrapper(){ + return this.isWrapper; + } + + /** + * INTERNAL: + * Return the FK cache + */ + public AbstractRecord getProtectedForeignKeys(){ + if (this.protectedForeignKeys == null){ + this.protectedForeignKeys = new DatabaseRecord(); + } + return this.protectedForeignKeys; + } + + /** + * INTERNAL: + * Return the value of the invalidationState Variable + * The return value will be a constant + * CHECK_INVALIDATION_POLICY - The Invalidation policy is must be checked for this cache key's sate + * CACHE_KEY_INVALID - This cache key has been labeled invalid. + */ + public int getInvalidationState() { + return invalidationState; + } + + /** + * Release the lock on the cache key object. + */ + public void release() { + if (this.isIsolated) { + this.depth.decrementAndGet(); + return; + } + super.release(); + } + + /** + * Release the deferred lock + */ + public void releaseDeferredLock() { + if (this.isIsolated) { + this.depth.decrementAndGet(); + return; + } + super.releaseDeferredLock(); + } + + /** + * Release the read lock on the cache key object. + */ + public void releaseReadLock() { + if (this.isIsolated) { + return; + } + super.releaseReadLock(); + } + + /** + * Removes this cacheKey from the owning map + */ + public Object removeFromOwningMap(){ + if (getOwningMap() != null){ + return getOwningMap().remove(this); + } + return null; + } + + /** + * INTERNAL: + * Set the value of the invalidationState Variable + * The possible values are from an enumeration of constants + * CHECK_INVALIDATION_POLICY - The invalidation policy is must be checked for this cache key's sate + * CACHE_KEY_INVALID - This cache key has been labelled invalid. + */ + public void setInvalidationState(int invalidationState) { + this.invalidationState = invalidationState; + } + + /** + * INTERNAL: + * This method sets the system time in millis seconds at which this object was last refreshed + * CR #4365 + * CR #2698903 ... instead of using millis we will now use ids instead. Method + * renamed appropriately. + */ + public void setLastUpdatedQueryId(long id) { + this.lastUpdatedQueryId = id; + } + + public void setKey(Object key) { + this.key = key; + } + + public void setObject(Object object) { + this.object = object; + } + + public void setOwningMap(IdentityMap map){ + this.mapOwner = map; + } + + public void setProtectedForeignKeys(AbstractRecord protectedForeignKeys) { + this.protectedForeignKeys = protectedForeignKeys; + } + + /** + * INTERNAL: + * Set the read time of this cache key + */ + public void setReadTime(long readTime) { + this.readTime = readTime; + invalidationState = CHECK_INVALIDATION_POLICY; + } + + public void setRecord(Record newRecord) { + this.record = newRecord; + } + + public void setWrapper(Object wrapper) { + this.wrapper = wrapper; + } + + public void setWriteLockValue(Object writeLockValue) { + this.writeLockValue = writeLockValue; + } + + public String toString() { + int hashCode = 0; + if (getObject() != null) { + hashCode = getObject().hashCode(); + } + + return "[" + getKey() + ": " + hashCode + ": " + getWriteLockValue() + ": " + getReadTime() + ": " + getObject() + "]"; + } + + /** + * Notifies that cache key that it has been accessed. + * Allows the LRU sub-cache to be maintained. + */ + public void updateAccess() { + // Nothing required by default. + } + + public void setIsolated(boolean isIsolated) { + this.isIsolated = isIsolated; + } + + public void setIsWrapper(boolean isWrapper) { + this.isWrapper = isWrapper; + } + + public Object getTransactionId() { + return transactionId; + } + + public void setTransactionId(Object transactionId) { + this.transactionId = transactionId; + } + + public Object waitForObject() { + getInstanceLock().lock(); + try { + try { + int count = 0; + while (this.object == null && isAcquired()) { + if (count > MAX_WAIT_TRIES) + throw ConcurrencyException.maxTriesLockOnBuildObjectExceded(getActiveThread(), Thread.currentThread()); + getInstanceLockCondition().await(10, TimeUnit.MILLISECONDS); + ++count; + } + } catch (InterruptedException ex) { + //ignore as the loop is broken + } + return this.object; + } finally { + getInstanceLock().unlock(); + } + } +} diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java index f217f11463c..ea730eab527 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024 Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2021 IBM Corporation. All rights reserved. * * This program and the accompanying materials are made available under the @@ -136,12 +136,13 @@ public class TraceLocalizationResource extends ListResourceBundle { + " The thread has been stuck for: ({3} ms) \n " + " Bellow we will describe the ActiveLocks, DeferredLocks and ReadLocks for this thread. " }, { "concurrency_util_owned_cache_key_null", "ObjectNull. Most likely not yet in server session cache and in the process of being created."}, - { "concurrency_util_owned_cache_key_is_cache_key", "--- CacheKey ({0}): (primaryKey: {1}) (object: {2}) (object hash code: {3}) (cacheKeyClass: {4}) (cacheKey hash code: {5}) (current cache key owner/activeThread: {6}) (getNumberOfReaders: {7}) " - + " (concurrencyManagerId: {8}) (concurrencyManagerCreationDate: {9})" - + " (totalNumberOfTimeCacheKeyAcquiredForReading: {10}) " - + " (totalNumberOfTimeCacheKeyReleasedForReading: {11}) " - + " (totalNumberOfTimeCacheKeyReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero: {12}) " - + "(depth: {13}) ---"}, + { "concurrency_util_owned_cache_key_is_cache_key", "--- CacheKey ({0}): (primaryKey: {1}) (object hash code: {2}) (cacheKeyClass: {3}) (cacheKey hash code: {4}) (current cache key owner/activeThread: {5}) (getNumberOfReaders: {6}) " + + " (concurrencyManagerId: {7}) (concurrencyManagerCreationDate: {8})" + + " (totalNumberOfTimeCacheKeyAcquiredForReading: {9}) " + + " (totalNumberOfTimeCacheKeyReleasedForReading: {10}) " + + " (totalNumberOfTimeCacheKeyReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero: {11}) " + + "(depth: {12})" + + " (cacheKeyInstanceCreatonNumber: {13} ) (primaryKeyAtTimeOfCreation: {14}) ---"}, { "concurrency_util_owned_cache_key_is_not_cache_key", "--- ConcurrencyManager: (ConcurrencyManagerClass: {0} ) (ConcurrencyManagerToString: {1}) (current cache key owner/activeThread: {2}) (concurrencyManagerId: {3}) (concurrencyManagerCreationDate: {4}) " + " (totalNumberOfTimeCacheKeyAcquiredForReading: {5}) " + " (totalNumberOfTimeCacheKeyReleasedForReading: {6}) " @@ -269,6 +270,17 @@ public class TraceLocalizationResource extends ListResourceBundle { + " stopping the candidate thread to make progress... We expect this code spot to never be invoked. " + " Either this thread made progress or if it continues to be stuck in the releaseDeferredLock " + " we most likely have an implementation bug somewhere. "}, + { "concurrency_util_create_information_threads_having_difficulty_getting_cache_keys_with_object_different_than_null_during_merge_clones_to_cache_after_transaction_commit_page_header" + , "Concurrency manager - Page 08 Start - Threads in MergeManager Acquiring Cache Keys for Clones to be merged into eclipselink server session cache of originals" + + "\n This section provides information about threads within the MergeManager that require cache keys for merging clones with changes." + + "\n Specifically, it focuses on the threads working in the context of an ObjectChangeSet where the server session CacheKey is found to still have CacheKy.object null," + + "\n and the CacheKey is acquired by a competing thread (typically an ObjectBuilder thread)." + + "\n see https://github.com/eclipse-ee4j/eclipselink/issues/2094 " + + "\nTotal number of threads waiting to see lock being released: {0}\n\n"}, + { "concurrency_util_create_information_threads_having_difficulty_getting_cache_keys_with_object_different_than_null_during_merge_clones_to_cache_after_transaction_commit_justification" + , "[currentThreadNumber: {0}] [ThreadName: {1}]: Justification for being stuck: {2}\n"}, + { "concurrency_util_create_information_threads_having_difficulty_getting_cache_keys_with_object_different_than_null_during_merge_clones_to_cache_after_transaction_commit_page_end" + , "Concurrency manager - Page 08 end - Threads in MergeManager Acquiring Cache Keys for Clones to be merged into eclipselink server session cache of originals\n"}, { "XML_call", "XML call" }, { "XML_data_call", "XML data call" }, { "XML_data_delete", "XML data delete" }, diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java.ORIGIN b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java.ORIGIN new file mode 100644 index 00000000000..f217f11463c --- /dev/null +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/localization/i18n/TraceLocalizationResource.java.ORIGIN @@ -0,0 +1,557 @@ +/* + * Copyright (c) 1998, 2021 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2021 IBM Corporation. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v. 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0, + * or the Eclipse Distribution License v. 1.0 which is available at + * http://www.eclipse.org/org/documents/edl-v10.php. + * + * SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause + */ + +// Contributors: +// Oracle - initial API and implementation from Oracle TopLink +// 09/13/2013-2.5 Guy Pelletier +// - 350487: JPA 2.1 Specification defined support for Stored Procedure Calls +// 11/07/2017 - Dalia Abo Sheasha +// - 526957 : Split the logging and trace messages +// 05/11/2020-2.7.0 Jody Grassel +// - 538296: Wrong month is returned if OffsetDateTime is used in JPA 2.2 code +package org.eclipse.persistence.internal.localization.i18n; + +import java.util.ListResourceBundle; + +/** + * English ResourceBundle for TraceLocalization messages. Traces are only localized. They do not get translated. + * These logs are picked up by AbstractSessionLog.getLog().log() when the level is below CONFIG=4 (FINE, FINER, FINEST, ALL) + * @author Shannon Chen + * @since OracleAS TopLink 10g (9.0.4) + */ +public class TraceLocalizationResource extends ListResourceBundle { + static final Object[][] contents = { + // FINEST, FINER and FINE level messages. + { "login_successful", "{0} login successful" }, + { "logout_successful", "{0} logout successful" }, + { "acquire_unit_of_work_with_argument", "acquire unit of work: {0}" }, + { "external_transaction_has_begun_internally", "external transaction has begun internally" }, + { "external_transaction_has_committed_internally", "external transaction has committed internally" }, + { "initialize_all_identitymaps", "initialize all identitymaps" }, + { "initialize_identitymap", "initialize identitymap: {0}" }, + { "initialize_identitymaps", "initialize identitymaps" }, + { "external_transaction_has_rolled_back_internally", "external transaction has rolled back internally" }, + { "validate_cache", "validate cache." }, + { "cache_item_creation", "Entity ({0}) with Id ({1}) was stored in the cache by thread (Id: {2} Name: {3})" }, + { "cache_item_refresh", "Entity ({0}) with Id ({1}) was refreshed in the cache by thread (Id: {2} Name: {3})" }, + { "cache_item_removal", "Entity ({0}) with Id ({1}) was removed from the cache by thread (Id: {2} Name: {3})" }, + { "cache_item_invalidation", "Entity ({0}) with Id ({1}) was invalidated from the cache by thread (Id: {2} Name: {3})" }, + { "cache_class_invalidation", "Entities based on class ({0}) was invalidated from the cache by thread (Id: {1} Name: {2})" }, + { "cache_hit", "Cache hit for entity ({0}) with Id ({1})" }, + { "cache_miss", "Cache miss for entity ({0}) with Id ({1})" }, + { "stack_of_visited_objects_that_refer_to_the_corrupt_object", "stack of visited objects that refer to the corrupt object: {0}" }, + { "corrupt_object_referenced_through_mapping", "corrupt object referenced through mapping: {0}" }, + { "corrupt_object", "corrupt object: {0}" }, + { "begin_unit_of_work_flush", "begin unit of work flush" }, + { "end_unit_of_work_flush", "end unit of work flush" }, + { "begin_unit_of_work_commit", "begin unit of work commit" }, + { "end_unit_of_work_commit", "end unit of work commit" }, + { "resume_unit_of_work", "resume unit of work" }, + { "resuming_unit_of_work_from_failure", "resuming unit of work from failure" }, + { "release_unit_of_work", "release unit of work" }, + { "revert_unit_of_work", "revert unit of work" }, + { "validate_object_space", "validate object space." }, + { "execute_query", "Execute query {0}" }, + { "merge_clone", "Merge clone {0} " }, + { "merge_clone_with_references", "Merge clone with references {0}" }, + { "new_instance", "New instance {0}" }, + { "register_existing", "Register the existing object {0}" }, + { "register_new", "Register the new object {0}" }, + { "register_new_bean", "Register the new bean {0}" }, + { "register", "Register the object {0}" }, + { "revert", "Revert the object''s attributes {0}" }, + { "unregister", "Unregister the object {0}" }, + { "begin_batch_statements", "Begin batch statements" }, + { "end_batch_statements", "End Batch Statements" }, + { "query_column_meta_data_with_column", "query column meta data ({0}.{1}.{2}.{3})" }, + { "query_column_meta_data", "query table meta data ({0}.{1}.{2})" }, + { "reconnecting_to_external_connection_pool", "reconnecting to external connection pool" }, + { "connecting", "connecting({0})" }, + { "disconnect", "disconnect" }, + { "reconnecting", "reconnecting({0})" }, + { "begin_transaction", "begin transaction" }, + { "commit_transaction", "commit transaction" }, + { "rollback_transaction", "rollback transaction" }, + { "adapter_result", "Adapter result: {0}" }, + { "data_access_result", "Data access result: {0}" }, + { "acquire_unit_of_work", "acquire unit of work" }, + { "JTS_register", "JTS register" }, + { "JTS_after_completion", "After JTS Completion" }, + { "JTS_before_completion", "Before JTS Completion" }, + { "JTS_begin", "Begin JTS transaction" }, + { "JTS_commit_with_argument", "JTS#commit({0})" }, + { "JTS_rollback", "Rollback JTS transaction." }, + { "JTS_commit", "Commit JTS transaction." }, + { "JTS_after_completion_with_argument", "After JTS Completion ({0})" }, + { "TX_beforeCompletion", "TX beforeCompletion callback, status={0}" }, + { "TX_afterCompletion", "TX afterCompletion callback, status={0}" }, + { "TX_bind", "TX binding to tx mgr, status={0}" }, + { "TX_begin", "TX beginTransaction, status={0}" }, + { "TX_beginningTxn", "TX Internally starting" }, + { "TX_commit", "TX commitTransaction, status={0}" }, + { "TX_committingTxn", "TX Internally committing" }, + { "TX_rollback", "TX rollbackTransaction, status={0}" }, + { "TX_rollingBackTxn", "TX Internally rolling back" }, + { "lock_writer_header", "Current object locks:" }, + { "lock_writer_footer", "End of locked objects." }, + { "active_thread", "Thread : {0}" }, + { "locked_object", "Locked Object : {0}" }, + { "depth", "Depth : {0}" }, + { "cachekey_released", "This thread is no longer holding the lock. It must not be a blocking thread."}, + { "cache_thread_info", "Cached entity ({0}) with Id ({1}) was prepared and stored into cache by another thread (id: {2} name: {3}), than current thread (id: {4} name: {5})" }, + { "deferred_locks", "Deferred lock on : {0}" }, + { "deferred_locks_released", "All deferred locks for thread \"{0}\" have been released." }, + { "acquiring_deferred_lock", "Thread \"{1}\" has acquired a deferred lock on object : {0} in order to avoid deadlock." }, + { "dead_lock_encountered_on_write", "Thread \"{1}\" encountered deadlock when attempting to lock : {0}. Entering deadlock avoidance algorithm." }, + { "dead_lock_encountered_on_write_no_cache_key", "Thread \"{2}\" encountered deadlock when attempting to lock object of class: {0} with PK {1}. Entering deadlock avoidance algorithm." }, + { "concurrency_manager_release_locks_acquired_by_thread_1", "releaseAllLocksAcquiredByThread: Thread \"{0}\" .The Lock manager is null. This might be an acquire operation. So not possible to lockManager.releaseActiveLocksOnThread(). Cache Key: \"{1}\"" }, + { "concurrency_manager_release_locks_acquired_by_thread_2", "releaseAllLocksAcquiredByThread: Release active locks on Thread \"{0}\"" }, + { "concurrency_manager_build_object_thread_complete_1", "isBuildObjectComplete ExpandedThread NR {0}: {1} \n" }, + { "concurrency_manager_build_object_thread_complete_2", "\nAll threads in this stack are doing object building and needed to defer on one or more cache keys.\n" + + "The last thread has deferred lock on ac cache key that is acquired by thread that is not yet finished with its work. \n\n"}, + { "concurrency_manager_build_object_thread_complete_3", "finalDeferredLockCausingTrouble: {0} \n" + + " This cache key had to be deferred by the last thread on the recursive stack. The thread was ACQUIRED. \n"}, + { "concurrency_manager_build_object_thread_complete_4", "activeThreadOnTheCacheKey: {0} \n" + + " hasDeferredLockManager: {1} \n " + + " This is the thread that has acquired the cache key and has been considered to not yet be finished with its business. \n" + + " When hasDeferredLockManager is true it typically means this thread is doing object building. \n" + + " When hasDeferredLockManager is false it might an object building thread or it could be a thread doing a commit and acquiring final locks to merge its objects with changesets look at the stack trace to understand. \n"}, + { "concurrency_manager_allow_concurrency_exception_fired_up", "allowConcurrencyExceptionToBeFiredUp: is set to FALSE." + + " No any exception be fired to avoid the risk of aborting the current thread not being sufficient to resolve any dead lock." + + " and leaving the system in a worth shape where aver 3 retries the business transaction is not re-attempted and the recovery of the system becomes complicated. " + + " We are going with the option of letting the system freeze. " }, + { "concurrency_util_stuck_thread_tiny_log_cache_key", "Stuck thread problem: unique tiny message number ({0}) \n" + + " The Thread [{1}] appears to be stuck (possible dead lock ongoing). \n" + + " The thread is working in the context of (CacheKey) = ({2}) . \n" + + " The thread has been stuck for: ({3} ms) \n " + + " Bellow we will describe the ActiveLocks, DeferredLocks and ReadLocks for this thread. " }, + { "concurrency_util_owned_cache_key_null", "ObjectNull. Most likely not yet in server session cache and in the process of being created."}, + { "concurrency_util_owned_cache_key_is_cache_key", "--- CacheKey ({0}): (primaryKey: {1}) (object: {2}) (object hash code: {3}) (cacheKeyClass: {4}) (cacheKey hash code: {5}) (current cache key owner/activeThread: {6}) (getNumberOfReaders: {7}) " + + " (concurrencyManagerId: {8}) (concurrencyManagerCreationDate: {9})" + + " (totalNumberOfTimeCacheKeyAcquiredForReading: {10}) " + + " (totalNumberOfTimeCacheKeyReleasedForReading: {11}) " + + " (totalNumberOfTimeCacheKeyReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero: {12}) " + + "(depth: {13}) ---"}, + { "concurrency_util_owned_cache_key_is_not_cache_key", "--- ConcurrencyManager: (ConcurrencyManagerClass: {0} ) (ConcurrencyManagerToString: {1}) (current cache key owner/activeThread: {2}) (concurrencyManagerId: {3}) (concurrencyManagerCreationDate: {4}) " + + " (totalNumberOfTimeCacheKeyAcquiredForReading: {5}) " + + " (totalNumberOfTimeCacheKeyReleasedForReading: {6}) " + + " (totalNumberOfTimeCacheKeyReleasedForReadingBlewUpExceptionDueToCacheKeyHavingReachedCounterZero: {7}) " + + "(depth: {8}) ---"}, + { "concurrency_util_header_current_cache_key", "Summary current cache key of thread {0} "}, + { "concurrency_util_header_active_locks_owned_by_thread", "Summary of active locks owned by thread {0} "}, + { "concurrency_util_header_deferred_locks_owned_by_thread", "Summary of deferred locks (could not be acquired and cause thread to wait for object building to complete) of thread {0} "}, + { "concurrency_util_header_reader_locks_owned_by_thread", "Summary of read locks acquired by thread {0} "}, + { "concurrency_util_summary_active_locks_on_thread_1", "Listing of all ACTIVE Locks.\n" + + "Thread Name: {0} \n"}, + { "concurrency_util_summary_active_locks_on_thread_2", "0 Active locks. The lockManager for this thread is null. \n\n"}, + { "concurrency_util_summary_active_locks_on_thread_3", "{0} Active locks.\n"}, + { "concurrency_util_summary_active_locks_on_thread_4", "Active lock nr: {0} , Active cache key: {1}\n\n"}, + { "concurrency_util_summary_deferred_locks_on_thread_1", "Listing of all DEFERRED Locks.\n" + + "Thread Name: {0} \n"}, + { "concurrency_util_summary_deferred_locks_on_thread_2", "0 deferred locks. The lockManager for this thread is null. \n\n"}, + { "concurrency_util_summary_deferred_locks_on_thread_3", "{0} Deferred locks.\n"}, + { "concurrency_util_summary_deferred_locks_on_thread_4", "Deferred lock nr: {0} , Deferred cache key: {1}\n\n"}, + { "concurrency_util_summary_read_locks_on_thread_step001_1", "Listing of all READ Locks. Step 001 - sparse summary loop over all read locks acquired:\n" + + "Thread Name: {0} \n"}, + { "concurrency_util_summary_read_locks_on_thread_step001_2", "0 read locks. The lockManager for this thread is null. \n\n"}, + { "concurrency_util_summary_read_locks_on_thread_step001_3", "{0} Read locks.\n"}, + { "concurrency_util_summary_read_locks_on_thread_step001_4", "Read lock nr: {0} , Read cache key: {1}\n\n"}, + { "concurrency_util_summary_read_locks_on_thread_step002_1", "\nListing of all READ Locks. Step 002 - fat-detailed information about all read locks acquired:"}, + { "concurrency_util_summary_read_locks_on_thread_step002_2", "Read locks acquired by thread: {0} with id: {1} never released.\n" + + "number of never released read locks: {2}"}, + { "concurrency_util_summary_read_locks_on_thread_step002_3", "\nRead lock nr: {0} Read lock Cache Key: {1}" + + "\nRead lock nr: {0} dateOfReadLockAcquisition: {2}" + + "\nRead lock nr: {0} numberOfReadersOnCacheKeyBeforeIncrementingByOne: {3}" + + "\nRead lock nr: {0} currentThreadStackTraceInformationCpuTimeCostMs: {4}"}, + { "concurrency_util_summary_read_locks_on_thread_step002_4", "\nRead lock nr: {0} stackTraceInformation: same as stack trace id: {1}"}, + { "concurrency_util_summary_read_locks_on_thread_step002_5", "\nRead lock nr: {0} Stack trace id: {1} Start" + + "\nRead lock nr: {0} stackTraceInformation: {2}" + + "\nRead lock nr: {0} Stack trace id: {1} End"}, + { "concurrency_util_summary_read_locks_on_thread_step002_6", "Read locks problems detected by thread: {0} during release read locks" + + "\n{1} removeReadLockProblemsDetected."}, + { "concurrency_util_summary_read_locks_on_thread_step002_7", "\nRelease read lock problem nr: {0} \n {1}"}, + { "concurrency_util_enrich_thread_dump", "enrichGenerateThreadDump: Failed to generate thread dump with error: {0} "}, + { "concurrency_util_enrich_thread_dump_thread_info_1", "\"{0}\" " + + "\n java.lang.Thread.State: {1}"}, + { "concurrency_util_enrich_thread_dump_thread_info_2", "\n at {0}"}, + { "concurrency_util_create_information_thread_dump", "Concurrency manager - Page 01 start - thread dump about all threads at time of event\n {0}" + + "\nConcurrency manager - Page 01 end - thread dump about all threads at time of event\n"}, + { "concurrency_util_create_information_all_threads_acquire_cache_keys_1", "Concurrency manager - Page 02 start - information about threads waiting to acquire (write/deferred) cache keys " + + "\nTotal number of threads waiting to acquire lock: {0}\n\n"}, + { "concurrency_util_create_information_all_threads_acquire_cache_keys_2", "[currentThreadNumber: {0}] [ThreadName: {1}]: Waiting to acquire (write/deferred): {2}\n"}, + { "concurrency_util_create_information_all_threads_acquire_cache_keys_3", "It seems, that trace was produced by the THREADS_TO_FAIL_TO_ACQUIRE_CACHE_KEYS - - org.eclipse.persistence.internal.helper.WriteLockManager.acquireRequiredLocks(MergeManager, UnitOfWorkChangeSet)"}, + { "concurrency_util_create_information_all_threads_acquire_cache_keys_4", "[methodNameThatGotStuckWaitingToAcquire: {0}] \n"}, + { "concurrency_util_create_information_all_threads_acquire_cache_keys_5", "Concurrency manager - Page 02 end - information about threads waiting to acquire (write/deferred) cache keys\n"}, + { "concurrency_util_create_information_all_threads_acquire_read_cache_keys_1", "Concurrency manager - Page 03 start - information about threads waiting to acquire read cache keys " + + "\nTotal number of threads waiting to acquire read locks: {0} \n\n"}, + { "concurrency_util_create_information_all_threads_acquire_read_cache_keys_2", "[currentThreadNumber: {0}] [ThreadName: {1} ]: Waiting to acquire (read lock): {2}\n"}, + { "concurrency_util_create_information_all_threads_acquire_read_cache_keys_3", "[methodNameThatGotStuckWaitingToAcquire: {0}] \n"}, + { "concurrency_util_create_information_all_threads_acquire_read_cache_keys_4", "Concurrency manager - Page 03 end - information about threads waiting to acquire read cache keys\n"}, + { "concurrency_util_create_information_all_threads_release_deferred_locks_1", "Concurrency manager - Page 04 start - information about threads waiting on release deferred locks (waiting for other thread to finish building the objects deferred) " + + "\nTotal number of threads waiting to acquire lock: {0} \n\n"}, + { "concurrency_util_create_information_all_threads_release_deferred_locks_2", "[currentThreadNumber: {0}] [ThreadName: {1} ]\n"}, + { "concurrency_util_create_information_all_threads_release_deferred_locks_3", "Concurrency manager - Page 04 end - information about threads waiting on release deferred locks (waiting for other thread to finish building the objects deferred)\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_1", "Concurrency manager - Page 05 start (currentThreadNumber: {0} of totalNumberOfThreads: {1}) - detailed information about specific thread " + + "\nThread: {2}" + + "\nThreadWaitingToReleaseDeferredLocks: {3}\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_2", " waitingOnAcquireWritingCacheKey: true waiting to acquire writing: {0}\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_3", " waitingOnAcquireWritingCacheKey: false\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_4", " waitingOnAcquireReadCacheKey: true waiting to acquire reading: {0}\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_5", " waitingOnAcquireReadCacheKey: false\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_6", " writeManagerThreadPrimaryKeysWithChangesToBeMerged: true" + + "\n writeManagerThreadPrimaryKeysWithChangesToBeMerged list: {0}\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_7", " writeManagerThreadPrimaryKeysWithChangesToBeMerged: false\n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_8", " waitingToReleaseDeferredLocksJustification: \n {0} \n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_9", " waitingToReleaseDeferredLocksJustification: information not available. \n"}, + { "concurrency_util_create_information_all_resources_acquired_deferred_10", "Concurrency manager - Page 05 end (currentThreadNumber: {0} of totalNumberOfThreads: {1}) - detailed information about specific thread\n"}, + { "concurrency_util_read_lock_manager_problem01", "Remove cache key from read lock manager problem 01:" + + "\n The current thread: {0} is about to decrement the currentNumberOfReaders from: {1} to decrementedNumberOfReaders {2} " + + "\n on the cache key: {3}" + + "\n however the readLockManager for the current thread is null." + + "\n This should never happen. If the readLockManager is null either the we have not incremented the number readers in the past. " + + "\n Or we have removed all the cache keys of the readLockManager belonging to this thread making the map become empty and be removed. " + + "\n It should never be the case that we are bout the decrement the count of readers and are not tracing the cache key are we about to decrement." + + "\n Stack trace as we detect the problem: {4}" + + "\n CurrentDate: {5}"}, + { "concurrency_util_read_lock_manager_problem02", "Remove cache key from read lock manager problem 02:" + + "\n removeReadLock PROBLEM Detected - mapThreadToReadLockAcquisitionMetadata does not have have threadId . " + + "\n The following problem is taking place. The Thread: {0} is about to decrease the number of readers on the cacheKey: {1} " + + "\n However this ReadLockManager currently has no entries on the mapThreadToReadLockAcquisitionMetadata associated to the ongoing threadId: {2}" + + "\n as result in terms of tracing we will simply ignore this call to removeReadLock for the current cache key. " + + "\n Problem is taking place in Thread: {2} " + + "\n Stack trace as we detect the problem: {3}" + + "\n CurrentDate: {4}"}, + { "concurrency_util_read_lock_manager_problem03", "Remove cache key from read lock manager problem 03:" + + "\n removeReadLock problem detected - mapThreadToReadLockAcquisitionMetadata.readLocksAcquiredDuringCurrentThread does not have have threadId . " + + "\n The following problem is taking place. The Thread: {0} is about to decrease the number of readers on the cacheKey: {1}" + + "\n However this ReadLockManager currently has no entries on the mapThreadToReadLockAcquisitionMetadata.readLocksAcquiredDuringCurrentThread associated to the ongoing threadId: {2}" + + "\n as result in terms of tracing we will simply ignore this call to removeReadLock for the current cache key. " + + "\n Problem is taking place in Thread: {2} " + + "\n Stack trace as we detect the problem: {3}" + + "\n CurrentDate: {4}"}, + { "concurrency_util_read_lock_acquisition_metadata", "No stack trace take. isAllowTakingStackTraceDuringReadLockAcquisition is false."}, + { "concurrency_util_dump_concurrency_manager_information_step02_01", "Start full concurrency manager state (massive) dump No: {0}\n"}, + { "concurrency_util_dump_concurrency_manager_information_step02_02", "End full concurrency manager state (massive) dump No: {0}\n"}, + { "concurrency_util_dump__dead_lock_explanation_01", "dumpDeadLockExplanationIfPossible code is buggy. The algorithm to try to explain dead locks should not break. Instead we have caught an exception"}, + { "concurrency_util_dump__dead_lock_explanation_02", "Concurrency manager - Page 07 start - dead lock explanation\n"}, + { "concurrency_util_dump__dead_lock_explanation_03", "We were not able to determine to determine a set of threads that went into dead lock."}, + { "concurrency_util_dump__dead_lock_explanation_04", "Dead lock result is comprised by {0} thread entries where one of them is repeated. \n"}, + { "concurrency_util_dump__dead_lock_explanation_05", "Dead lock participantNR: {0} " + + "\n {1} \n\n"}, + { "concurrency_util_dump__dead_lock_explanation_06", "deadlock algorithm took {0} ms to try to find deadlock." + + "\nConcurrency manager - Page 07 end - dead lock explanation\n"}, + { "concurrency_util_cache_keys_threads_making_use_cache_key_01", "Concurrency manager - Page 06 start - information about cache keys and threads needing them " + + "\nTotal number of cacheKeys to describe: {0} \n\n"}, + { "concurrency_util_cache_keys_threads_making_use_cache_key_02", "-------------- [currentCacheKeyNumber: {0} of {1}]--------------\n"}, + { "concurrency_util_cache_keys_threads_making_use_cache_key_03", "[currentCacheKeyNumber: {0}] [CacheKey: {1} ]:" + + "\n[currentCacheKeyNumber: {0}] threadsThatAcquiredActiveLock: {2}" + + "\n[currentCacheKeyNumber: {0}] threadsThatAcquiredDeferredLock: {3}" + + "\n[currentCacheKeyNumber: {0}] threadsThatAcquiredReadLock: {4}" + + "\n[currentCacheKeyNumber: {0}] threadsKnownToBeStuckTryingToAcquireLock: {5}" + + "\n[currentCacheKeyNumber: {0}] threadsKnownToBeStuckTryingToAcquireLockForReading: {6}" + + "\n[currentCacheKeyNumber: {0}] threads doing object building with root on this cache key (not yet supported)...\n\n"}, + { "concurrency_util_cache_keys_threads_making_use_cache_key_04", "Concurrency manager - Page 06 end - information about cache keys and threads needing them\n"}, + { "explain_dead_lock_util_current_thread_blocked_active_thread_warning", "recursiveExplainPossibleDeadLockStep03Scenario03CurrentWriterVsCacheKeyActiveThread: nextCandidateThreadPartOfTheDeadLock is Thread: {0} " + + " This thread appears to be blocking the thread: {1} from progressing because it is set as the active thread on the cacheKey: {2} "}, + { "explain_dead_lock_util_thread_stuck_deferred_locks", "recursiveExplainPossibleDeadLockStep04ExpandBasedOnThreadStuckOnReleaseDeferredLocks: currentCandidateThreadPartOfTheDeadLock is Thread: {0} " + + " This thread appears to be stuck in the releaseDeferredLock logic. " + + " But our hacked implementation of the isBuildObjectOnThreadComplete was not able to explain what thread and cache key are recursively " + + " stopping the candidate thread to make progress... We expect this code spot to never be invoked. " + + " Either this thread made progress or if it continues to be stuck in the releaseDeferredLock " + + " we most likely have an implementation bug somewhere. "}, + { "XML_call", "XML call" }, + { "XML_data_call", "XML data call" }, + { "XML_data_delete", "XML data delete" }, + { "XML_data_insert", "XML data insert" }, + { "XML_data_read", "XML data read" }, + { "XML_data_update", "XML data update" }, + { "XML_delete", "XML delete" }, + { "XML_existence_check", "XML existence check" }, + { "XML_insert", "XML insert" }, + { "XML_read_all", "XML read all" }, + { "XML_read", "XML read" }, + { "XML_update", "XML update" }, + { "write_BLOB", "Writing BLOB value(size = {0} bytes) through the locator to the table field: {1}" }, + { "write_CLOB", "Writing CLOB value(size = {0} bytes) through the locator to the table field: {1}" }, + { "assign_sequence", "assign sequence to the object ({0} -> {1})" }, + { "assign_return_row", "Assign return row {0}" }, + { "compare_failed", "Compare failed: {0}:{1}:{2}" }, + { "added_unmapped_field_to_returning_policy", "Added unmapped field {0} to ReturningPolicy of {1}" }, + { "field_for_unsupported_mapping_returned", "Returned field {0} specified in ReturningPolicy of {1} mapped with unsupported mapping" }, + { "received_updates_from_remote_server", "Received updates from Remote Server" }, + { "received_remote_connection_from", "Received remote connection from {0}" }, + { "applying_changeset_from_remote_server", "Applying changeset from remote server {0}" }, + { "change_from_remote_server_older_than_current_version", "Change from Remote Server is older than current Version for {0}: {1}" }, + { "current_version_much_older_than_change_from_remote_server", "Current Version is much older than change from remote server for {0}: {1}" }, + { "Merging_from_remote_server", "Merging {0}: {1} from remote server" }, + { "initializing_local_discovery_communication_socket", "Initializing local discovery communication socket" }, + { "place_local_remote_session_dispatcher_into_naming_service", "Place local remote session dispatcher into naming service" }, + { "connecting_to_other_sessions", "connecting to other sessions" }, + { "done", "Done" }, + { "getting_local_initial_context", "Getting local initial context" }, + { "received_connection_from", "Received connection from {0}" }, + { "sending_changeset_to_network", "Sending changeSet to network" }, + { "failed_to_reconnect_remote_connection", "Failed to reconnect the remote connection on error" }, + { "dropping_connection", "Dropping connection: {0}" }, + { "attempting_to_reconnect_to_JMS_service", "Attempting to reconnect to JMS service" }, + { "retreived_remote_message_from_JMS_topic", "Retreived remote message from JMS topic: {0}" }, + { "retreived_remote_message_from_jgroup_channel", "Retreived remote message from JGroups channel: {0}" }, + { "processing_topLink_remote_command", "Processing TopLink remote command" }, + { "JMS_exception_thrown", "JMSException thrown" }, + { "announcement_sent_from", "Announcement sent from {0}" }, + { "announcement_received_from", "Announcement received from {0}" }, + { "reconnect_to_jms", "Reconnect to the JMS topic name {0}" }, + { "sequencing_connected", "sequencing connected, state is {0}" }, + { "sequencing_connected_several_states", "sequencing connected, several states are used" }, + { "sequence_without_state", "sequence {0}: preallocation size {1}" }, + { "sequence_with_state", "sequence {0}: preallocation size {1}, state {2}" }, + { "sequencing_disconnected", "sequencing disconnected" }, + { "sequencing_localPreallocation", "local sequencing preallocation for {0}: objects: {1} , first: {2}, last: {3}" }, + { "sequencing_afterTransactionCommitted", "local sequencing preallocation is copied to preallocation after transaction commit" }, + { "sequencing_afterTransactionRolledBack", "local sequencing preallocation is discarded after transaction roll back" }, + { "sequencing_preallocation", "sequencing preallocation for {0}: objects: {1} , first: {2}, last: {3}" }, + { "starting_rcm", "Starting Remote Command Manager {0}" }, + { "stopping_rcm", "Stopping Remote Command Manager {0}" }, + { "initializing_discovery_resources", "Initializing discovery resources - group={0} port={1}" }, + { "sending_announcement", "Sending service announcement..." }, + { "register_local_connection_in_jndi", "Registering local connection in JNDI under name {0}" }, + { "register_local_connection_in_registry", "Registering local connection in RMIRegistry under name {0}" }, + { "context_props_for_remote_lookup", "Remote context properties: {0}" }, + { "looking_up_remote_conn_in_jndi", "Looking up remote connection in JNDI under name {0} at URL {1}" }, + { "looking_up_remote_conn_in_registry", "Looking up remote connection in RMIRegistry at {0}" }, + { "unable_to_look_up_remote_conn_in_jndi", "Unable to look up remote connection in JNDI under name {0} at URL {1}" }, + { "unable_to_look_up_remote_conn_in_registry", "Unable to look up remote connection in RMIRegistry under name {0}" }, + { "converting_to_toplink_command", "Converting {0} to TopLink Command format" }, + { "converting_to_user_command", "Converting {0} from TopLink Command format to user format" }, + { "executing_merge_changeset", "Executing MergeChangeSet command from {0}" }, + { "received_remote_command", "Received remote command {0} from {1}" }, + { "processing_internal_command", "Executing internal RCM command {0} from {1}" }, + { "processing_remote_command", "Executing command {0} from {1}" }, + { "sync_propagation", "Propagating command synchronously" }, + { "async_propagation", "Propagating command asynchronously" }, + { "propagate_command_to", "Propagating command {0} to {1}" }, + { "discovery_manager_active", "RCM Discovery Manager active" }, + { "discovery_manager_stopped", "RCM Discovery Manager stopped" }, + { "announcement_sent", "RCM service announcement sent out to cluster" }, + { "announcement_received", "RCM service announcement received from {0}" }, + { "creating_session_broker", "Creating session broker: {0}" }, + { "creating_database_session", "Creating database session: {0}" }, + { "creating_server_session", "Creating server session: {0}" }, + { "EJB_create", "Create EJB ({0}) " }, + { "EJB_find_all", "Find all EJB objects ({0})" }, + { "EJB_find_all_by_name", "Find all EJB objects by named query ({0})" }, + { "EJB_find_one", "Find one EJB object ({0})" }, + { "EJB_find_one_by_name", "Find one EJB object by named query ({0})" }, + { "EJB_load", "Load EJB" }, + { "EJB_remove", "Remove EJB ({0})" }, + { "EJB_store", "Store EJB ({0})" }, + { "error_in_preInvoke", "Error in preInvoke." }, + { "unable_to_load_generated_subclass", "Unable to load generated subclass: {0}" }, + { "WebLogic_10_Platform_serverSpecificRegisterMBeans_enter", "WebLogic_10_Platform.serverSpecificRegisterMBeans enter" }, + { "WebLogic_10_Platform_serverSpecificRegisterMBeans_return", "WebLogic_10_Platform.serverSpecificRegisterMBeans return" }, + { "executeFinder_query", "executeFinder query: {0}, {1}" }, + { "executeFinder_finder_execution_results", "executeFinder - finder execution results: {0}" }, + { "PM_initialize_enter", "PersistenceManager.initialize enter for {0}" }, + { "PM_initialize_return", "PersistenceManager.initialize return for {0}" }, + { "PM_preDeploy_enter", "PersistenceManager.preDeploy enter for {0}" }, + { "PM_preDeploy_return", "PersistenceManager.preDeploy return for {0}" }, + { "PM_postDeploy_enter", "PersistenceManager.postDeploy enter for {0}" }, + { "PM_postDeploy_return", "PersistenceManager.postDeploy return for {0}" }, + { "createEJB_call", "createEJB call: {0}" }, + { "createEJB_return", "createEJB return: {0}" }, + { "removeEJB_call", "removeEJB call: {0}" }, + { "invokeHomeMethod_call", "invokeHomeMethod call: {0}({1})" }, + { "invokeHomeMethod_return", "invokeHomeMethod return" }, + { "ProjectDeployment_undeploy_enter", "ProjectDeployment.undeploy enter" }, + { "ProjectDeployment_undeploy_return", "ProjectDeployment.undeploy return" }, + { "ProjectDeployment_configureDescriptor_enter", "ProjectDeployment.configureDescriptor enter: {0}" }, + { "ProjectDeployment_configureDescriptor_return", "ProjectDeployment.configureDescriptor return" }, + { "ProjectDeployment_configureDescriptors_enter", "ProjectDeployment.configureDescriptors enter" }, + { "ProjectDeployment_configureDescriptors_return", "ProjectDeployment.configureDescriptors return" }, + { "configuring_descriptor", "configuring descriptor: {0}, {1}" }, + { "concrete_class", "concrete class: {0}" }, + { "setting_ref_class_of_foreign_ref_mapping", "setting ref class of foreign ref mapping: {0}, {1}" }, + { "setting_ref_class_of_aggregate_mapping", "setting ref class of aggregate mapping: {0}, {1}" }, + { "desc_has_inheritance_policy", "Descriptor has inheritance policy: {0}" }, + { "one_time_initialization_of_ProjectDeployment", "one-time initialization of ProjectDeployment" }, + { "generateBeanSubclass_call", "generateBeanSubclass call: {0}" }, + { "remote_and_local_homes", "remote and local homes: {0}, {1}" }, + { "generateBeanSubclass_return", "generateBeanSubclass return: {0}" }, + { "error_in_startBusinessCall", "Error in startBusinessCall." }, + { "error_in_endLocalTx", "Error in endLocalTx." }, + { "EJB20_Project_Deployment_adjustDescriptorsForUOW_enter", "UOWChangeSetFlagCodeGenerator.adjustDescriptorForUOWFlag enter" }, + { "EJB20_Codegeneration_For_UOW_Change_Policy_enter", "UOWChangePolicyCodeGenerator.generateCodeForUOWChangePolicy enter" }, + { "OBJECTCHANGEPOLICY_TURNED_ON", "Change tracking turned on for: {0}" }, + { "PM_DescriptorContents", "********** PersistenceManager.getPMDescriptorContents()" }, + { "project_class_used", "The project class [{0}] is being used." }, + { "pessimistic_lock_bean", "prepare pessimistic locking for bean {0}" }, + { "changetracker_interface_not_implemented", "Class [{0}] for attribute [{1}] does not implement ChangeTracker interface. This class is being reverted to DeferredChangeDetectionPolicy." }, + { "changetracker_interface_not_implemented_non_cmp", "Class [{0}] is being reverted to DeferredChangeDetectionPolicy since the attribute [{1}] " + "is a non-cmp field but the owinging class does not implement ChangeTracker interface." }, + { "acquire_client_session_broker", "acquire client session broker" }, + { "releasing_client_session_broker", "releasing client session broker" }, + { "client_released", "client released" }, + { "client_acquired", "client acquired: {0}" }, + { "tracking_pl_object", "track pessimistic locked object {0} with UnitOfWork {1}" }, + { "instantiate_pl_relationship", "instantiate pessimistic locking relationship when relationship is accessed in a new transaction." }, + { "descriptor_xml_not_in_jar", "The descriptor file ({0}) is not found in jar({1}) file, no migration therefore will be performed for this jar." }, + { "pessimistic_locking_migrated", "The native CMP setting 'pessimistic-locking' on entity({0}) has been migrated and supported." }, + { "read_only_migrated", "The native CMP setting 'read-only' on entity({0}) has been migrated and supported." }, + { "call_timeout_migrated", "Oc4j native CMP setting 'time-out' on entity({0}) has been migrated and supported." }, + { "verifiy_columns_version_locking_migrated", "Optimistic setting 'Version' on 'verifiy-columns' in entity ({0}) has been migrated." }, + { "verifiy_columns_timestamp_locking_migrated", "Optimistic setting 'Timestamp' on 'verifiy-columns' in entity ({0}) has been migrated." }, + { "verifiy_columns_changedField_locking_migrated", "Optimistic setting 'Modify' on 'verifiy-columns' in entity ({0}) has been migrated." }, + { "order_database_operations_supported", "WLS native CMP setting 'order-database-operations' has been supported and migrated" }, + { "pattern_syntax_error", "Regular expression syntax error, exception is: {0}" }, + { "weaver_user_impl_change_tracking", "Weaving for change tracking not required for class [{0}] because it already implements the ChangeTracker interface."}, + { "weaver_found_field_lock", "Weaving for change tracking not enabled for class [{0}] because it uses field-based optimisitic locking."}, + { "weaver_processing_class", "Class [{0}] registered to be processed by weaver."}, + { "begin_weaving_class", "Begin weaver class transformer processing class [{0}]."}, + { "end_weaving_class", "End weaver class transformer processing class [{0}]."}, + { "transform_missing_class_details", "Missing class details for [{0}]."}, + { "transform_existing_class_bytes", "Using existing class bytes for [{0}]."}, + { "weaved_lazy", "Weaved lazy (ValueHolder indirection) [{0}]."}, + { "weaved_fetchgroups", "Weaved fetch groups (FetchGroupTracker) [{0}]."}, + { "weaved_changetracker", "Weaved change tracking (ChangeTracker) [{0}]."}, + { "weaved_persistenceentity", "Weaved persistence (PersistenceEntity) [{0}]."}, + { "weaved_rest", "Weaved REST [{0}]."}, + { "cmp_init_invoke_predeploy", "JavaSECMPInitializer - predeploying {0}."}, + { "cmp_init_register_transformer", "JavaSECMPInitializer - registering transformer for {0}."}, + { "cmp_init_tempLoader_created", "JavaSECMPInitializer - created temporary ClassLoader: {0}."}, + { "cmp_init_shouldOverrideLoadClassForCollectionMembers", "JavaSECMPInitializer - override load class for collection members: {0}."}, + { "cmp_loading_entities_using_loader", "JavaSECMPInitializer - loading entities using ClassLoader: {0}."}, + { "cmp_init_transformer_is_null", "JavaSECMPInitializer - transformer is null."}, + { "cmp_init_globalInstrumentation_is_null", "JavaSECMPInitializer - global instrumentation is null."}, + { "cmp_init_invoke_deploy", "JavaSECMPInitializer - deploying {0}."}, + { "cmp_init_completed_deploy", "JavaSECMPInitializer - completed deploy of {0}."}, + { "cmp_init_initialize", "JavaSECMPInitializer - initializing {0}."}, + { "cmp_init_initialize_from_main", "JavaSECMPInitializer - initializing from main."}, + { "cmp_init_initialize_from_agent", "JavaSECMPInitializer - initializing from agent."}, + { "validation_factory_not_initialized", "Bean Validation Factory was not initialized: [{0}]."}, + { "searching_for_default_mapping_file", "Searching for mapping file: [{0}] at root URL: [{1}]."}, + { "found_default_mapping_file", "Found mapping file: [{0}] at root URL: [{1}]."}, + + { "detect_server_platform","Detected server platform: {0}."}, + { "configured_server_platform", "Configured server platform: {0}"}, + { "dbPlatformHelper_detectedVendorPlatform", "Detected database platform: {0}"}, + { "dbPlatformHelper_regExprDbPlatform", "Database platform: {1}, regular expression: {0}"}, + { "dbPlatformHelper_patternSyntaxException", "Exception while using regular expression: {0}" }, + { "platform_ora_init_id_seq", "Init Oracle identity sequence {0} -> {1} for table {2}"}, + { "platform_ora_remove_id_seq", "Remove Oracle identity sequence {0} -> {1} for table {2}"}, + { "unknown_query_hint", "query {0}: unknown query hint {1} will be ignored"}, + { "query_hint", "query {0}: query hint {1}; value {2}"}, + { "property_value_specified", "property={0}; value={1}"}, + { "property_value_default", "property={0}; default value={1}"}, + { "handler_property_value_specified", "property={0}; value={1}; translated value={2}"}, + { "handler_property_value_default", "property={0}; default value={1}; translated value={2}"}, + { "predeploy_begin", "Begin predeploying Persistence Unit {0}; session {1}; state {2}; factoryCount {3}"}, + { "predeploy_end", "End predeploying Persistence Unit {0}; session {1}; state {2}; factoryCount {3}"}, + { "session_name_change", "Session change name: Persistence Unit {0}; old session {1}; new session {2}"}, + { "deploy_begin", "Begin deploying Persistence Unit {0}; session {1}; state {2}; factoryCount {3}"}, + { "deploy_end", "End deploying Persistence Unit {0}; session {1}; state {2}; factoryCount {3}"}, + { "undeploy_begin", "Begin undeploying Persistence Unit {0}; session {1}; state {2}; factoryCount {3}"}, + { "undeploy_end", "End undeploying Persistence Unit {0}; session {1}; state {2}; factoryCount {3}"}, + { "composite_member_begin_call", "Begin {0} on composite member Persistence Unit {1}; state {2}"}, + { "composite_member_end_call", "End {0} on composite member Persistence Unit {1}; state {2}"}, + { "loading_session_xml", "Loading persistence unit from sessions-xml file: {0}, session-name: {1}"}, + + { "default_tables_created", "The table ({0}) is created."}, + { "cannot_create_table", "The table ({0}) could not be created due to exception: {1}" }, + { "cannot_add_field_to_table", "The field ({0}) could not be added to the table ({1}) due to exception: {2}" }, + { "identity_map_does_not_exist", "Identity Map [{0}] does not exist" }, + { "identity_map_is_empty", "Identity Map [{0}] is empty" }, + { "key_value", "Key [{0}] => Value [{1}]" }, + { "no_identity_maps_in_this_session", "There are no Identity Maps in this session" }, + { "identity_map_class", "Identity Map [{0}] class = {1}" }, + { "identity_map_initialized", "Identity Map [{0}] is initialized" }, + { "identity_map_invalidated", "Identity Map [{0}] is invalidated" }, + { "no_classes_in_session", "No Classes in session." }, + + { "creating_broadcast_connection", "{0}: creating connection." }, + { "broadcast_connection_created", "{0}: connection created." }, + { "failed_to_create_broadcast_connection", "{0}: failed to create connection." }, + { "broadcast_sending_message", "{0}: sending message {1}" }, + { "broadcast_sent_message", "{0}: has sent message {1}" }, + { "broadcast_closing_connection", "{0}: connection is closing." }, + { "broadcast_connection_closed", "{0}: connection closed." }, + { "broadcast_retreived_message", "{0}: has received message {1}" }, + { "broadcast_processing_remote_command", "{0}: processing message {1} sent by service id {2}: processing remote command {3}." }, + { "broadcast_connection_start_listening", "{0}: Start listening." }, + { "broadcast_connection_stop_listening", "{0}: Stop listening." }, + { "sdo_type_generation_processing_type", "{0}: Generating Type [{1}]."}, + { "sdo_type_generation_processing_type_as", "{0}: Generating Type [{1}] as [{2}]."}, + { "registered_mbean", "Registered MBean: {0} on server {1}" }, + { "unregistering_mbean", "Unregistering MBean: {0} on server {1}" }, + { "mbean_get_application_name", "The applicationName for the MBean attached to session [{0}] is [{1}]" }, + { "mbean_get_module_name", "The moduleName for the MBean attached to session [{0}] is [{1}]" }, + { "active_thread_is_different_from_current_thread", "Forcing the activeThread \"{0}\" on the mergeManager \"{1}\" to be the currentThread \"{2}\" because they are different." }, + { "dead_lock_encountered_on_write_no_cachekey", "Potential deadlock encountered while thread: {2} attempted to lock object of class: {0} with id: {1}, entering deadlock avoidance algorithm. This is a notice only."}, + { "metamodel_attribute_class_type_is_null", "Metamodel processing: The class type is null for the attribute: {0}." }, + { "metamodel_mapping_type_is_unsupported", "Metamodel processing: The mapping type [{0}] in the attribute [{1}] is currently unsupported." }, + { "metamodel_descriptor_type_eis_or_xml_is_unsupported", "Metamodel processing: EIS or XML ClassDescriptor instances [{0}] are currently not supported." }, + { "connect_drivermanager_fail", "DriverManager connect failed, trying direct connect."}, + { "metamodel_unable_to_determine_element_type_in_absence_of_generic_parameters", "Metamodel processing: Unable to get the element type for the mapping [{0}] in the absence of generic parameters on mapping declaration." }, + { "metamodel_init_failed", "Initialization of the metamodel failed during deployment. Ignoring exception: [{0}] " }, + { "metamodel_canonical_model_class_not_found", "Canonical Metamodel class [{0}] not found during initialization."}, + { "metamodel_canonical_model_class_found", "Canonical Metamodel class [{0}] found and instantiated during initialization."}, + { "metamodel_relationaldescriptor_javaclass_null_on_managedType", "Metamodel processing: The javaClass field is null for the relationalDescriptor [{0}] for the managedType [{1}]." }, + { "metamodel_attribute_getmember_is_null", "The java Member is null for the Attribute [{0}] with managedType [{1}] and descriptor [{2}]." }, + { "metamodel_typeImpl_javaClass_should_not_be_null", "The metamodel TypeImpl.javaClass field should not be set to null for the Type [{0}] with name [{1}]." }, + { "jmx_mbean_classloader_in_use", "EclipseLink JMX Runtime Services is referencing the [{0}] ClassLoader at: [{1}]" }, + { "metamodel_itentifiableType_javaclass_null_cannot_set_supertype", "Metamodel processing: Unable to set the superclass Hierarchy because the javaClass field is null for the relationalDescriptor [{0}] for the identifiableType [{1}]." }, + { "metamodel_relationaldescriptor_not_fully_initialized_yet", "Metamodel processing: The relationalDescriptor [{0}] for the managedType [{1}] is not fully initialized yet - the Metamodel instance will be incomplete before at least one entityManger session login (after a full deploy)." }, + { "no_weaved_vh_method_found_verify_weaving_and_module_order", "An expected weaving method [{0}] was not found on the accessor [{2}] on the mapping [{1}] - verify that the processing order of your modules places the one containing a persistence unit ahead of modules that use it in your deployment descriptor, or disable weaving for the persistence context or the mapping using FetchType.EAGER." }, + { "proxy_connection_customizer_already_proxy_session", "{0}:{1}: proxy session with unknown properties is already opened. Closing it."}, + { "proxy_connection_customizer_opened_proxy_session", "{0}:{1}: opened proxy session."}, + { "proxy_connection_customizer_closing_proxy_session", "{0}:{1}: closing proxy session."}, + { "releasing_invalid_lock", "A lock has been encountered where the thread: {0} is no longer active. The lock on object class :{1} id: {2} has been forcibly released"}, + { "acquire_connection", "Connection acquired from connection pool [{0}]." }, + { "release_connection", "Connection released to connection pool [{0}]." }, + { "failover", "Connection pool [{0}] is dead, failing over to poll [{1}]." }, + { "max_time_exceeded_for_acquirerequiredlocks_wait", "MAX TIME {0} seconds EXCEEDED FOR WRITELOCKMANAGER WAIT. Waiting on Entity type: {1}with pk: {2} currently locked by thread: {3} with the following trace:\n"}, + { "dcn_registering", "Registering for database change event notification." }, + { "dcn_change_event", "Receieved database change event [{0}]." }, + { "dcn_invalidate", "Invalidating cache key [{0}] from database change event for class [{1}]." }, + { "dcn_register_table", "Registering table [{0}] for database change event notification." }, + { "dcn_unregister", "Removing registering for database change event notification." }, + + { "exception_caught_closing_statement", "Exception caught when trying to close the query statement [{0}]." }, + //MOXy fine/finer/finest + { "schema_factory", "SchemaFactory instance: {0}" }, + { "saxparser_factory", "SAXParserFactory instance: {0}" }, + { "xpath_factory", "XPathFactory instance: {0}" }, + { "transformer_factory", "TransformerFactory instance: {0}" }, + { "documentbuilder_factory", "DocumentBuilderFactory instance: {0}" }, + { "jaxp_sec_disabled", "Xml Security disabled, no JAXP {0} external access configuration necessary." }, + { "jaxp_sec_explicit", "Detected explicitly JAXP configuration, no JAXP {0} external access configuration necessary." }, + { "jaxp_sec_prop_supported", "Property {0} is supported and has been successfully set by used JAXP implementation." }, + { "jaxp_sec_prop_not_supported", "Property {0} is not supported by used JAXP implementation." }, + { "moxy_start_marshalling", "Marshalling \"{0}\" into {1} started"}, + { "moxy_start_unmarshalling", "Unmarshalling {0} into \"{1}\" by \"{2}\" started"}, + { "moxy_read_from_moxy_json_provider", "MOXyJsonProvider.readFrom(...) is called."}, + { "moxy_write_to_moxy_json_provider", "MOXyJsonProvider.writeTo(...) is called."}, + { "moxy_set_marshaller_property", "Setting marshaller property (name/value): {0}/{1}"}, + { "moxy_set_unmarshaller_property", "Setting unmarshaller property (name/value): {0}/{1}"}, + { "moxy_set_jaxb_context_property", "Setting JAXBContext property (name/value): {0}/{1}"}, + + { "invalid_tzone", "Invalid timezone conversion property {0} value: {1}. Will attempt to resolve default." }, + { "invalid_default_tzone", "Invalid timezone conversion property {0} value: {1}. Defaulting to UTC." }, + { "using_conversion_tzone", "ConversionManager using default zone offset: {1}." } + }; + + /** + * Return the lookup table. + */ + protected Object[][] getContents() { + return contents; + } +} diff --git a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/sessions/AbstractSession.java b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/sessions/AbstractSession.java index cd930922b4f..cec18b9dc7d 100644 --- a/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/sessions/AbstractSession.java +++ b/foundation/org.eclipse.persistence.core/src/org/eclipse/persistence/internal/sessions/AbstractSession.java @@ -50,6 +50,7 @@ import java.util.Map; import java.util.Set; import java.util.Vector; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import org.eclipse.persistence.config.PersistenceUnitProperties; @@ -77,8 +78,11 @@ import org.eclipse.persistence.internal.databaseaccess.Platform; import org.eclipse.persistence.internal.descriptors.ObjectBuilder; import org.eclipse.persistence.internal.helper.ConcurrencyManager; +import org.eclipse.persistence.internal.helper.ConcurrencyUtil; +import org.eclipse.persistence.internal.helper.DeferredLockManager; import org.eclipse.persistence.internal.helper.Helper; import org.eclipse.persistence.internal.helper.QueryCounter; +import org.eclipse.persistence.internal.helper.ReadLockManager; import org.eclipse.persistence.internal.helper.linkedlist.ExposedNodeLinkedList; import org.eclipse.persistence.internal.history.HistoricalSession; import org.eclipse.persistence.internal.identitymaps.CacheKey; @@ -164,6 +168,36 @@ * @see DatabaseSessionImpl */ public abstract class AbstractSession extends CoreAbstractSession implements org.eclipse.persistence.sessions.Session, CommandProcessor, Serializable, Cloneable { + + /** + * This flag we use if the abstract session is stuck trying to merge the change set to the main server cache + * is stuck trying to get a cache key with object different than null set on the cache key. + * + * We are more than willing to allow an interrupted exception to be fired up + * because we have seen the method playing a role as protagonist in deadlocks + * and since the metho can set the cache key to invalid we are more than happy to get an interrupt exception + * it that has any chance to out of the dead lock. + */ + private static final Boolean ALLOW_INTERRUPTED_EXCEPTION_TO_BE_FIRED_UP_TRUE = true; + + + /** + * See the issue:
    + * https://github.com/eclipse-ee4j/eclipselink/issues/2094. + * + * These are threads involved in the "getCacheKeyFromTargetSessionForMerge" and that detect that the cache key + * somehow still has the Object of the cache key set to null. + * If the cache key is acquired by different thread, the thread will bwaiting and hoping for that cache key + * to eventually stop being acquired. + * + * But this process is highly risk as the thread doing the wait might be the owner of resources of the thread + * that is currently the owner of the cache key it desires. + */ + private static final Map THREADS_INVOLVED_WITH_MERGE_MANAGER_WAITING_FOR_DEFERRED_CACHE_KEYS_TO_NO_LONGER_BE_ACQUIRED = new ConcurrentHashMap<>(); + + + + /** ExceptionHandler handles database exceptions. */ transient protected ExceptionHandler exceptionHandler; @@ -2835,8 +2869,331 @@ public int getNumberOfActiveUnitsOfWork() { * For use within the merge process this method will get an object from the shared * cache using a readlock. If a readlock is unavailable then the merge manager will be * transitioned to deferred locks and a deferred lock will be used. + * + *

    2024.04.27: This is a method we might see being used in the commmit phase of transaction. + * Take note that the snippet of stack trace we provide bellow does not necessarily the lines of code of present data + * but the stack trace is good enough to give clarity of the context in which we meight be seeing this method in use. + *

    +     * [ACTIVE] ExecuteThread: '164' for queue: 'weblogic.kernel.Default (self-tuning)'"
    +   java.lang.Thread.State: WAITING
    +        at java.base@11.0.13/java.lang.Object.wait(Native Method)
    +        at java.base@11.0.13/java.lang.Object.wait(Object.java:328)
    +        at org.eclipse.persistence.internal.sessions.AbstractSession.getCacheKeyFromTargetSessionForMerge(AbstractSession.java:2858) <--- This is the point we are attacking this code could get stuck forever.
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:782)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:716)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChanges(MergeManager.java:320)
    +        at org.eclipse.persistence.mappings.ObjectReferenceMapping.mergeIntoObject(ObjectReferenceMapping.java:524)
    +        at org.eclipse.persistence.internal.descriptors.ObjectBuilder.mergeIntoObject(ObjectBuilder.java:4202)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:825)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:716)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChanges(MergeManager.java:320)
    +        at org.eclipse.persistence.mappings.CollectionMapping.mergeIntoObject(CollectionMapping.java:1643)
    +        at org.eclipse.persistence.internal.descriptors.ObjectBuilder.mergeIntoObject(ObjectBuilder.java:4202)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:847)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:716)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChanges(MergeManager.java:320)
    +        at org.eclipse.persistence.mappings.CollectionMapping.mergeIntoObject(CollectionMapping.java:1643)
    +        at org.eclipse.persistence.internal.descriptors.ObjectBuilder.mergeIntoObject(ObjectBuilder.java:4202)
    +        at org.eclipse.persistence.internal.descriptors.ObjectBuilder.mergeChangesIntoObject(ObjectBuilder.java:4135)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:854)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChangesOfWorkingCopyIntoOriginal(MergeManager.java:716)
    +        at org.eclipse.persistence.internal.sessions.MergeManager.mergeChanges(MergeManager.java:320)
    +        at org.eclipse.persistence.internal.sessions.UnitOfWorkImpl.mergeChangesIntoParent(UnitOfWorkImpl.java:3394)
    +        at org.eclipse.persistence.internal.sessions.RepeatableWriteUnitOfWork.mergeChangesIntoParent(RepeatableWriteUnitOfWork.java:382)
    +        at org.eclipse.persistence.internal.sessions.UnitOfWorkImpl.mergeClonesAfterCompletion(UnitOfWorkImpl.java:3527)
    +        at org.eclipse.persistence.transaction.AbstractSynchronizationListener.afterCompletion(AbstractSynchronizationListener.java:219)
    +        at org.eclipse.persistence.transaction.JTASynchronizationListener.afterCompletion(JTASynchronizationListener.java:81)
    +        at weblogic.transaction.internal.ServerSCInfo.doAfterCompletion(ServerSCInfo.java:1145)
    +        at weblogic.transaction.internal.ServerSCInfo.callAfterCompletionsForTier(ServerSCInfo.java:1121)
    +        at weblogic.transaction.internal.ServerSCInfo.callAfterCompletions(ServerSCInfo.java:1093)
    +        at weblogic.transaction.internal.ServerTransactionImpl.callAfterCompletions(ServerTransactionImpl.java:3773)
    +        at weblogic.transaction.internal.ServerTransactionImpl.afterCommittedStateHousekeeping(ServerTransactionImpl.java:3662)
    +        at weblogic.transaction.internal.ServerTransactionImpl.setCommitted(ServerTransactionImpl.java:3716)
    +        at weblogic.transaction.internal.ServerTransactionImpl.globalRetryCommit(ServerTransactionImpl.java:3432)
    +        at weblogic.transaction.internal.ServerTransactionImpl.globalCommit(ServerTransactionImpl.java:3327)
    +        at weblogic.transaction.internal.ServerTransactionImpl.internalCommit(ServerTransactionImpl.java:322)
    +        at weblogic.transaction.internal.ServerTransactionImpl.commit(ServerTransactionImpl.java:270)
    +        at weblogic.ejb.container.internal.MDListener.execute(MDListener.java:465)
    +        at weblogic.ejb.container.internal.MDListener.transactionalOnMessage(MDListener.java:361)
    +        at weblogic.ejb.container.internal.MDListener.onMessage(MDListener.java:297)
    +        at weblogic.jms.client.JMSSession.onMessage(JMSSession.java:5141)
    +        at weblogic.jms.client.JMSSession.execute(JMSSession.java:4795)
    +        at weblogic.jms.client.JMSSession.executeMessage(JMSSession.java:4190)
    +        at weblogic.jms.client.JMSSession.access$000(JMSSession.java:132)
    +        at weblogic.jms.client.JMSSession$UseForRunnable.run(JMSSession.java:5666)
    +        at weblogic.work.SelfTuningWorkManagerImpl$WorkAdapterImpl.run(SelfTuningWorkManagerImpl.java:677)
    +     * 
    + * + * + * If we look at the {@code at org.eclipse.persistence.transaction.JTASynchronizationListener.afterCompletion(JTASynchronizationListener.java:81)} + * we see that the javadoc tells us that + * {@code Called by the JTA transaction manager after the transaction is committed + * or rolled back. This method executes without a transaction context.} + * + * That is the DB is already up to date. + * And now the eclipselink cache must move forward and update the server session cache to match the changes of the merge manager. + * + * @throws + */ + protected CacheKey getCacheKeyFromTargetSessionForMerge(Object implementation, ObjectBuilder builder, ClassDescriptor descriptor, MergeManager mergeManager) { + // (backwards-compatibility-mode) + // Check the concurrency Util configuration + // if explictly says we want to run the original code which is not a smart idea + // since we will likely face the bug https://github.com/eclipse-ee4j/eclipselink/issues/2094 + // then we still have this possibility + if(ConcurrencyUtil.SINGLETON.isAbstractSessionModeOfOperationOfMergeManagerGetCacheKeyBackwardsCompatibility()) { + return getCacheKeyFromTargetSessionForMergeDeprecated(implementation, builder, descriptor, mergeManager); + } + + // (a) Initiate the hunt for the cache key + // if the object does not yet have a primary key we can get out of this method + // note: assuming this is because the object is brand new object + Object original = null; + Object primaryKey = builder.extractPrimaryKeyFromObject(implementation, this, true); + if (primaryKey == null) { + return null; + } + + // (b) scenario of getting a cache key where a merge manager is not involved in the process + // note: this step is somewhat strange because if the cache key is different than null + // then the code will do this step called checkReadLock that acquires and releases the cache keys for reading + // somewhat like testing if you can startup a car by rotating the key but then immedaitely shutting down the car + // not obvious what the added valueof this checkReadLock actually, expert knowledge is neeeded here + CacheKey cacheKey = null; + if (mergeManager == null){ + cacheKey = getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey, implementation.getClass(), descriptor, true); + if (cacheKey != null){ + cacheKey.checkReadLock(); + } + return cacheKey; + } + + // (c) We are trying acquire the key as part of some merge process + // here the approach number one is trying acquire a read lock but not being willing to wait anytime + // so we see the call to acquireReadLockNoWait + // then the strange thing is that the method bothers itself with doing + // cacheKey.getObject() does not check if the object is null or not + // essentially returns. + // Otherwise, the code here seems activate a deferred lock approach + // by calling upon acquireDeferredLock() + // when the thread comes out of a acquireDeferredLock it either is the active thread on the cache key + // or it is just another thread that indicates that it deferred on the cache key. + // This thread then wants to somehow make sure that the cache keys has the object field + // evovling to a non null value. or at least this is what it looks like + // NOTE: The section of code bellow where see the check " is the original on the cacheKey null? " and if it is it goes + // into an eternal wait is simply super dangerous: + // https://github.com/eclipse-ee4j/eclipselink/issues/2094 + // we have seen that we had thread eternally blocked. + // What was happening here was that the thread going via merge mmanager was owning several CachKeys for writing. + // the thread A stuck on this are of code was owning a total of 56 cache keys + // we then had another thread B that was politely doing a + // ConcurrencyManager.releaseDeferredLock(ConcurrencyManager.java:654) + // this thread B was the owner of the cache key that thread A was wanting to merge changes into + // the thread B was doomed to never finish object building because the thread A stuck here in aneternal loop + // wanting for the cache key to be freed up + + cacheKey = getIdentityMapAccessorInstance().getCacheKeyForObject(primaryKey, implementation.getClass(), descriptor, true); + if (cacheKey != null) { + if (cacheKey.acquireReadLockNoWait()) { + original = cacheKey.getObject(); + cacheKey.releaseReadLock(); + } else { + if (!mergeManager.isTransitionedToDeferredLocks()) { + getIdentityMapAccessorInstance().getWriteLockManager().transitionToDeferredLocks(mergeManager); + } + cacheKey.acquireDeferredLock(); + try { + + getCacheKeyFromTargetSessionForMergeScenarioMergeManagerNotNullAndCacheKeyOriginalStillNull(cacheKey, implementation, builder, descriptor, mergeManager); + + } finally { + cacheKey.releaseDeferredLock(); + } + + + } + } + return cacheKey; + } + + /** + * This method is a sub-setep of the {@link org.eclipse.persistence.internal.sessions.AbstractSession#getCacheKeyFromTargetSessionForMerge(Object, ObjectBuilder, ClassDescriptor, MergeManager)}. + * In this case we are dealing with a scenario where the mergeManager parameter is knowng to be not null and the cache key is knowng to be no null + * however the CacheKey object is still found to be null. + * The original implementation wanted to wait for a signal that the cache key has been released and is no longer acquired, presumebly because + * that should technically imly that the previous owner of the cache key has finished building the object + * and has now set it on the cache key. That is what the old code seemed to imply. + * + * The old implementation was bugy, potentially leading to an eternal wait and creating a dead lock where the thread involved in the dead lock + * is not aware that it is the middle of dead lock and cuasing problems. + * + * @param cacheKey + * guaranteed to no longer be null + * + * @param implementation + * we assume this is the CLONED object that has been modified and should eventually be merged to the server session cache. + * @param builder + * this is the object builder + * @param descriptor + * metadata about the entity associated to the present object + * @param mergeManager + * the merge manager that is want to grab the cache key for the current object and it appears to be the case + * that it is hoping for the cache key to ideally have and object set on it. + * @return A non null cache key + * @throws InterruptedException + * unexpected blow up either during wait call the thread gets interrupted or the concurrency util decides + * that it is time to kill the deadlocked thread. + */ + @SuppressWarnings("java:S2445") + protected CacheKey getCacheKeyFromTargetSessionForMergeScenarioMergeManagerNotNullAndCacheKeyOriginalStillNull( + final CacheKey cacheKey, final Object implementation, final ObjectBuilder builder, + final ClassDescriptor descriptor, final MergeManager mergeManager) { + // (a) basic pre-conditions to call this method + // we know exactly from where we call this method and we know that these assumptions are true 100% of the time + assert cacheKey != null; + assert mergeManager != null; + + // (b) basic variable initialization + final Thread currentThread = Thread.currentThread(); + final String currentThreadName = currentThread.getName(); + final long whileStartTimeMillis = System.currentTimeMillis(); + final DeferredLockManager lockManager = cacheKey.getDeferredLockManager(currentThread); + final ReadLockManager readLockManager = cacheKey.getReadLockManager(currentThread); + Object original = cacheKey.getObject(); + boolean originalIsStillNull = original == null; + boolean weBlieveToBeStuckInADeadlock = false; + + // (c) This method we are writing here is a new implementation for the old deprecated code + // around the topic of the problematic eternal wait on a cache key + // that original code would never go into the eternal wait if the cache key get object was different than null + // so that is what we are doing in this if. The cache key already has an object set we can just get out of here + if (!originalIsStillNull) { + return cacheKey; + } + + // (d) At this point we really the dangerous terriroty of the defect + // https://github.com/eclipse-ee4j/eclipselink/issues/2094 + // this is where the old code would just start doing an eternal wait and hope to one day be notified on the + // cache key + // back into work and out of the method. + // That old approach is not good because we might be living a deadlock right now + // so if we are in dead lock the last thing we need is an eternal wait + // we hope to get out of the deadlock by returning out of this method and making the cache key invalid + synchronized (cacheKey) { + // (e) the old code would do cacheKey.isAcquired() + // we have enhance the concurreny manager with a better API the + // isAcquiredForWritingAndOwneddByADifferentThread + // it is possible that the cache key we are dealing with was acquired by ourselves in the step above + boolean someOtherThreadCurrentlyOwningTheCacheKey = cacheKey + .isAcquiredForWritingAndOwneddByADifferentThread(); + + // (f) if we look at the old code we would see it checking the synchronized block if + // cacheKey.isAcquired() if not the method would just end. That is what we are doing here + if (!someOtherThreadCurrentlyOwningTheCacheKey) { + return cacheKey; + } + + // (g) At this point ar really getting into a dangerous terriotory + // prepare a justificaton string to explain why our thread is stuck + final String cacheKeyToStringOwnedByADifferentThread = ConcurrencyUtil.SINGLETON + .createToStringExplainingOwnedCacheKey(cacheKey); + String justifiCationAsToWhyWeAreStuckHere = String.format( + " Merge manager logic is currently stuck in the process of trying to return the cache key: %1$s " + + " this cache key is currently acquired by a competing thread: %2$s . " + + " This cache key has the problem that its original object is still set to NULL. " + + " The ope of this current thread is that by waiting for some time the current owner of the cache key will finish object building and release the cache key. " + + " Note: There is real risk that we are in a deadlock. The daedlock exists if the current thread: %3$s " + + " is owning other cache key resources as a writer. Any lock acquired by the current thread might be needed by competing threads. " + + " Much in the same manner that our current thread is stuck hoping to see this specific cache key being released. ", + cacheKeyToStringOwnedByADifferentThread, cacheKey.getActiveThread(), currentThreadName); + + // (g) let us start our wait loop, hoping for the competing thread (e.g. most likely working on object + // building) to finish its activity + try { + setJustificationWhyThreadInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired( + justifiCationAsToWhyWeAreStuckHere); + while ( + // (i) as soon as the cache key is not owned by anybody we can safely leave. + // This is based on the original deprecated code would only be doing its eternal wait on the cache key + // if in the key had been flagged as acquired. If the flag was not flagged as acquired it would not + // enter that eternal wait + someOtherThreadCurrentlyOwningTheCacheKey && + + // (ii) for us to enter this loop and stay on this loop we do it only if + // the cache key has the object set to null + // again this is because the original deprecate code would only be doing its eternal wait + // if and only if the cache key object was found to be null + originalIsStillNull && + // (iii) THis check here is code inroduced in 2024.04.27 essentially + // it is part of our approach to try to escape the dead lock of issue + // https://github.com/eclipse-ee4j/eclipselink/issues/2094 + // what we are doing here is to check if we are convinced to be stuck + // in a deadlock if we are, we probably should be releasing all of our cache keys + // flagging them all as invalid cache keys and simply interrupt the merge process completely + // but since we do not have the experience to do this sort of change + // we just just do somethign soft we get return out of the method + // as the parent called in MergeManager.mergeChangesOfWorkingCopyIntoOriginal + // seems to be robust enough to handle our early exit with all its if-else logic + !weBlieveToBeStuckInADeadlock) { + + // (i) instead of doing an eternal wait we do a timed wait + cacheKey.wait(ConcurrencyUtil.SINGLETON.getAcquireWaitTime()); + + // (ii) each time we wake up, check if we might be in a dead lock + // Run a method that will fire up an exception if we having been sleeping for too long + // if the persistence.xml eclipselink.concurrency.manager.allow.concurrency.exception is set to true + // than the method we are calling here will fire up an exception + // if not it will swallow exceptions, but we will still know via the return that we are stuck in a + // deadlock + weBlieveToBeStuckInADeadlock = ConcurrencyUtil.SINGLETON + .determineIfReleaseDeferredLockAppearsToBeDeadLocked(cacheKey, whileStartTimeMillis, + lockManager, readLockManager, ALLOW_INTERRUPTED_EXCEPTION_TO_BE_FIRED_UP_TRUE); + + // (ii) Update all loop control variables + someOtherThreadCurrentlyOwningTheCacheKey = cacheKey + .isAcquiredForWritingAndOwneddByADifferentThread(); + original = cacheKey.getObject(); + originalIsStillNull = original == null; + + } // close the parameterized while block. + + } catch (InterruptedException e) { + // here we do not follow the original approach of simply returning out of the method + // the code above this method in the merge manager exuecting the mergeChangesOfWorkingCopyIntoOriginal + // seems to cater for million different scenarios of the original cache key get object returning null + // So just returning here should actually wor more or less ok. + // But do not trust that the merge process will go alright at this point + // the safest course of action is to state that the cache key is invalid + // this means the cache should self heal by needing to recompute this object + // this a small prices to pay compared to getting a corrupted cache + cacheKey.setInvalidationState(CacheKey.CACHE_KEY_INVALID); + return cacheKey; + } finally { + if (weBlieveToBeStuckInADeadlock) { + cacheKey.setInvalidationState(CacheKey.CACHE_KEY_INVALID); + } + clearJustificationWhyThreadInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired(); + } + + } // close synchronized block + return cacheKey; + + } + + + + + /** + * INTERNAL: + * For use within the merge process this method will get an object from the shared + * cache using a readlock. If a readlock is unavailable then the merge manager will be + * transitioned to deferred locks and a deferred lock will be used. + * @deprecated see bug https://github.com/eclipse-ee4j/eclipselink/issues/2094 + * + * */ - protected CacheKey getCacheKeyFromTargetSessionForMerge(Object implementation, ObjectBuilder builder, ClassDescriptor descriptor, MergeManager mergeManager){ + @Deprecated + protected CacheKey getCacheKeyFromTargetSessionForMergeDeprecated(Object implementation, ObjectBuilder builder, ClassDescriptor descriptor, MergeManager mergeManager){ Object original = null; Object primaryKey = builder.extractPrimaryKeyFromObject(implementation, this, true); if (primaryKey == null) { @@ -5477,4 +5834,16 @@ public void setTolerateInvalidJPQL(boolean b) { public boolean shouldTolerateInvalidJPQL() { return this.tolerateInvalidJPQL; } + + public static Map getThreadsInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired() { + return new HashMap<>(THREADS_INVOLVED_WITH_MERGE_MANAGER_WAITING_FOR_DEFERRED_CACHE_KEYS_TO_NO_LONGER_BE_ACQUIRED); + } + + public static void clearJustificationWhyThreadInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired() { + THREADS_INVOLVED_WITH_MERGE_MANAGER_WAITING_FOR_DEFERRED_CACHE_KEYS_TO_NO_LONGER_BE_ACQUIRED.remove(Thread.currentThread()); + } + + public static void setJustificationWhyThreadInvolvedWithMergeManagerWaitingForDeferredCacheKeysToNoLongerBeAcquired(String justification) { + THREADS_INVOLVED_WITH_MERGE_MANAGER_WAITING_FOR_DEFERRED_CACHE_KEYS_TO_NO_LONGER_BE_ACQUIRED.put(Thread.currentThread(), justification); + } } diff --git a/jpa/org.eclipse.persistence.jpa/src/org/eclipse/persistence/internal/jpa/EntityManagerSetupImpl.java b/jpa/org.eclipse.persistence.jpa/src/org/eclipse/persistence/internal/jpa/EntityManagerSetupImpl.java index 2ce6d9dc697..754f7faca37 100644 --- a/jpa/org.eclipse.persistence.jpa/src/org/eclipse/persistence/internal/jpa/EntityManagerSetupImpl.java +++ b/jpa/org.eclipse.persistence.jpa/src/org/eclipse/persistence/internal/jpa/EntityManagerSetupImpl.java @@ -2888,6 +2888,7 @@ protected void updateSession(Map m, ClassLoader loader) { updateConcurrencyManagerMaxAllowedFrequencyToProduceMassiveDumpLogMessage(m); updateConcurrencyManagerAllowInterruptedExceptionFired(m); updateConcurrencyManagerAllowConcurrencyExceptionToBeFiredUp(m); + updateAbstractSessionModeOfOperationOfMergeManagerGetCacheKey(m); updateConcurrencyManagerAllowTakingStackTraceDuringReadLockAcquisition(m); updateConcurrencyManagerUseObjectBuildingSemaphore(m); updateConcurrencyManagerUseWriteLockManagerSemaphore(m); @@ -3836,6 +3837,23 @@ private void updateConcurrencyManagerAllowConcurrencyExceptionToBeFiredUp(Map pe } } + + /** + * Relates to https://github.com/eclipse-ee4j/eclipselink/issues/2094 + * and is telling us what sort of behavior we want to have when executing the + * {@code org.eclipse.persistence.internal.sessions.AbstractSession.getCacheKeyFromTargetSessionForMerge(Object, ObjectBuilder, ClassDescriptor, MergeManager)} + */ + private void updateAbstractSessionModeOfOperationOfMergeManagerGetCacheKey(Map persistenceProperties) { + String abstractSessionModeOfOperationOfMergeManagerGetCacheKey = EntityManagerFactoryProvider + .getConfigPropertyAsStringLogDebug( + PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_ABSTRACT_SESSION_GET_CACHE_KEY_FOR_MERGE_MANAGER_MODE_OF_OPERATION, + persistenceProperties, session); + if (abstractSessionModeOfOperationOfMergeManagerGetCacheKey != null) { + ConcurrencyUtil.SINGLETON.setAbstractSessionModeOfOperationOfMergeManagerGetCacheKey( + abstractSessionModeOfOperationOfMergeManagerGetCacheKey.trim()); + } + } + private void updateConcurrencyManagerAllowTakingStackTraceDuringReadLockAcquisition(Map persistenceProperties) { String allowTakingStackTraceDuringReadLockAcquisition = EntityManagerFactoryProvider.getConfigPropertyAsStringLogDebug(PersistenceUnitProperties.CONCURRENCY_MANAGER_ALLOW_STACK_TRACE_READ_LOCK, persistenceProperties, session); try {