Skip to content

Commit

Permalink
Merge pull request #5996 from GafferHQ/historyRegressionFix
Browse files Browse the repository at this point in the history
SceneAlgo : Fix history() performance regression
  • Loading branch information
murraystevenson authored Aug 13, 2024
2 parents c9012f1 + 0fa9c08 commit 993ab44
Show file tree
Hide file tree
Showing 4 changed files with 93 additions and 35 deletions.
2 changes: 2 additions & 0 deletions Changes.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ Improvements
- Blue : Proxy
- Red : Guide
- Catalogue : Added a handle for controlling the relative sizes of the listing and image property widgets.
- RenderPassEditor, LightEditor : Improved update performance for certain graph configurations, by optimising `SceneAlgo::history()` (#5199).

Fixes
-----
Expand All @@ -26,6 +27,7 @@ Fixes
- Fixed bug which allowed locked Catalogues to be edited.
- Fixed NodeEditor update when the first image is added or the last image is removed.
- NameWidget : Fixed bug which allowed plugs on locked nodes to be renamed.
- ValuePlug : Fixed the plug passed to `Monitor::forceMonitoring()`. Previously `Process::destinationPlug()` was being passed instead of `Process::plug()`.

API
---
Expand Down
40 changes: 40 additions & 0 deletions python/GafferSceneTest/SceneAlgoTest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1946,6 +1946,46 @@ def assertNoCanceller( history ) :

assertNoCanceller( history )

@GafferTest.TestRunner.PerformanceTestMethod()
def testHistoryDiamondPerformance( self ) :

# A series of diamonds where every iteration reads the output of the
# previous iteration twice and then feeds into the next iteration.
#
# o
# / \
# o o
# \ /
# o
# / \
# o o
# \ /
# o
# / \
# ...
# \ /
# o
#
# Without caching, this leads to an explosion of paths through the
# graph, and can lead to poor performance in `history()`.

plane = GafferScene.Plane()

loop = Gaffer.Loop()
loop.setup( plane["out"] )
loop["in"].setInput( plane["out"] )

copyOptions = GafferScene.CopyOptions()
copyOptions["in"].setInput( loop["previous"] )
copyOptions["source"].setInput( loop["previous"] )
copyOptions["options"].setValue( "*" )

loop["next"].setInput( copyOptions["out"] )
loop["iterations"].setValue( 20 )

with GafferTest.TestRunner.PerformanceScope() :
GafferScene.SceneAlgo.history( loop["out"]["globals"] )

def testLinkingQueries( self ) :

# Everything linked to `defaultLights` via the default value for the attribute.
Expand Down
2 changes: 1 addition & 1 deletion src/Gaffer/ValuePlug.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ class ValuePlug::HashProcess : public Process
// Then get our hash. We do this using this `acquireHash()` functor so that
// we can repeat the process for `Checked` mode.

const bool forceMonitoring = Process::forceMonitoring( threadState, plug, staticType );
const bool forceMonitoring = Process::forceMonitoring( threadState, p, staticType );

auto acquireHash = [&]( const HashCacheKey &cacheKey ) {

Expand Down
84 changes: 50 additions & 34 deletions src/GafferScene/SceneAlgo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -437,8 +437,6 @@ class CapturingMonitor : public Monitor
{
const Plug *p = process->plug();

CapturedProcess::Ptr capturedProcess;
ProcessOrScope entry;
if( !shouldCapture( p ) )
{
// Parents may spawn other processes in support of the requested plug. This is one
Expand All @@ -450,39 +448,44 @@ class CapturingMonitor : public Monitor
// order of the stack is preserved - if this happens out of order, the stack will be
// corrupted, and weird crashes will happen. But as long as it is created in
// processStarted, and released in processFinished, everything should line up.
entry = std::make_unique<Monitor::Scope>( this, false );
}
else
{
capturedProcess = std::make_unique<CapturedProcess>();
capturedProcess->type = process->type();
capturedProcess->plug = p;
capturedProcess->destinationPlug = process->destinationPlug();
capturedProcess->context = new Context( *process->context(), /* omitCanceller = */ true );
entry = capturedProcess.get();
Mutex::scoped_lock lock( m_mutex );
m_processMap[process] = std::make_unique<Monitor::Scope>( this, false );
return;
}

// Capture this process.

CapturedProcess::Ptr capturedProcess = std::make_unique<CapturedProcess>();
capturedProcess->type = process->type();
capturedProcess->plug = p;
capturedProcess->destinationPlug = process->destinationPlug();
capturedProcess->context = new Context( *process->context(), /* omitCanceller = */ true );

Mutex::scoped_lock lock( m_mutex );
m_processMap[process] = std::move( entry );
m_processMap[process] = capturedProcess.get();

if( capturedProcess )
ProcessMap::const_iterator it = m_processMap.find( process->parent() );
if( it != m_processMap.end() )
{
ProcessMap::const_iterator it = m_processMap.find( process->parent() );
if( it != m_processMap.end() )
CapturedProcess * const * parent = boost::get< CapturedProcess* >( &it->second );
if( parent && *parent )
{
CapturedProcess * const * parent = boost::get< CapturedProcess* >( &it->second );
if( parent && *parent )
{
(*parent)->children.push_back( std::move( capturedProcess ) );
}
}
else
{
// Either `process->parent()` was null, or was started
// before we were made active via `Monitor::Scope`.
m_rootProcesses.push_back( std::move( capturedProcess ) );
(*parent)->children.push_back( std::move( capturedProcess ) );
}
}
else
{
// Either `process->parent()` was null, or was started
// before we were made active via `Monitor::Scope`.
m_rootProcesses.push_back( std::move( capturedProcess ) );
}

// Remember that we've monitored this process so that we dont force
// its monitoring again.

IECore::MurmurHash h = process->context()->hash();
h.append( reinterpret_cast<intptr_t>( p ) );
m_monitoredSet.insert( h );
}

void processFinished( const Process *process ) override
Expand All @@ -498,12 +501,22 @@ class CapturingMonitor : public Monitor

bool forceMonitoring( const Plug *plug, const IECore::InternedString &processType ) override
{
if( processType == g_hashProcessType && shouldCapture( plug ) )
if( processType != g_hashProcessType || !shouldCapture( plug ) )
{
return true;
return false;
}

return false;
// Don't force the monitoring of a process we've monitored already. This does
// mean we throw away diamond dependencies in the process graph, but it is essential
// for performance in some cases - see `testHistoryDiamondPerformance()` for example.
/// \todo Potentially we could use the hash to find the previously captured process,
/// and instance that into our process graph. This would require clients of `History`
/// to be updated to handle such topologies efficiently by tracking previously visited
/// items. It may also be of fairly low value, since typically our goal is to find the
/// first relevant path through the graph to present to the user.
IECore::MurmurHash h = Context::current()->hash();
h.append( reinterpret_cast<intptr_t>( plug ) );
return !m_monitoredSet.count( h );
}

private :
Expand All @@ -518,15 +531,18 @@ class CapturingMonitor : public Monitor
}

using Mutex = tbb::spin_mutex;

Mutex m_mutex;

using ProcessOrScope = boost::variant<CapturedProcess *, std::unique_ptr< Monitor::Scope>>;
using ProcessMap = boost::unordered_map<const Process *, ProcessOrScope>;

const IECore::InternedString m_scenePlugChildName;

// Protects `m_processMap` and `m_rootProcesses`.
/// \todo Perhaps they should be concurrent containers instead?
Mutex m_mutex;
ProcessMap m_processMap;
CapturedProcess::PtrVector m_rootProcesses;
IECore::InternedString m_scenePlugChildName;

tbb::concurrent_unordered_set<IECore::MurmurHash> m_monitoredSet;

};

Expand Down

0 comments on commit 993ab44

Please sign in to comment.