Skip to content

Commit

Permalink
1545 First pull request
Browse files Browse the repository at this point in the history
  • Loading branch information
kladkogex committed Nov 25, 2024
1 parent 09d589b commit 232ec28
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 33 deletions.
36 changes: 18 additions & 18 deletions libdevcore/LevelDB.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ namespace dev::db {

unsigned c_maxOpenLeveldbFiles = 25;

const size_t LevelDB::BATCH_CHUNK_SIZE = 10000;

namespace {
inline leveldb::Slice toLDBSlice( Slice _slice ) {
return leveldb::Slice( _slice.data(), _slice.size() );
Expand Down Expand Up @@ -328,8 +330,20 @@ void LevelDB::forEachWithPrefix( std::string& _prefix, std::function< bool( Slic
}
}

void LevelDB::createBlockSnap( uint64_t _blockNumber ) {
SharedDBGuard lock( *this ); // protect so db is not reopened during snap creation
m_snapManager.addSnapForBlock( _blockNumber, m_db, m_dbReopenId );
}

std::shared_ptr< LevelDBSnap > LevelDB::getLastBlockSnap() const {
SharedDBGuard lock( *this ); // protect so db is not reopened when while we get snap
auto snap = m_snapManager.getLastBlockSnap();
LDB_CHECK( snap );
return snap;
}

h256 LevelDB::hashBase() const {
SharedDBGuard lock( *this ); // protect so db is not reopened during iteration
SharedDBGuard lock( *this );
std::unique_ptr< leveldb::Iterator > it( m_db->NewIterator( m_readOptions ) );
if ( it == nullptr ) {
BOOST_THROW_EXCEPTION( DatabaseError() << errinfo_comment( "null iterator" ) );
Expand Down Expand Up @@ -357,7 +371,7 @@ h256 LevelDB::hashBase() const {
}

h256 LevelDB::hashBaseWithPrefix( char _prefix ) const {
SharedDBGuard lock( *this ); // protect so db is not reopened during iteration
SharedDBGuard lock( *this );
std::unique_ptr< leveldb::Iterator > it( m_db->NewIterator( m_readOptions ) );
if ( it == nullptr ) {
BOOST_THROW_EXCEPTION( DatabaseError() << errinfo_comment( "null iterator" ) );
Expand All @@ -381,7 +395,7 @@ h256 LevelDB::hashBaseWithPrefix( char _prefix ) const {
}

bool LevelDB::hashBasePartially( secp256k1_sha256_t* ctx, std::string& lastHashedKey ) const {
SharedDBGuard lock( *this ); // protect so db is not reopened during iteration
SharedDBGuard lock( *this );
std::unique_ptr< leveldb::Iterator > it( m_db->NewIterator( m_readOptions ) );
if ( it == nullptr ) {
BOOST_THROW_EXCEPTION( DatabaseError() << errinfo_comment( "null iterator" ) );
Expand Down Expand Up @@ -415,29 +429,15 @@ bool LevelDB::hashBasePartially( secp256k1_sha256_t* ctx, std::string& lastHashe
}

void LevelDB::doCompaction() const {
SharedDBGuard lock( *this ); // protect so db is not reopened during compaction
SharedDBGuard lock( *this );
m_db->CompactRange( nullptr, nullptr );
}


void LevelDB::createBlockSnap( uint64_t _blockNumber ) {
SharedDBGuard lock( *this ); // protect so db is not reopened during snap creation
m_snapManager.addSnapForBlock( _blockNumber, m_db, m_dbReopenId );
}

std::shared_ptr< LevelDBSnap > LevelDB::getLastBlockSnap() const {
SharedDBGuard lock( *this ); // protect so db is not reopened when while we get snap
auto snap = m_snapManager.getLastBlockSnap();
LDB_CHECK( snap );
return snap;
}

std::atomic< uint64_t > LevelDB::g_keysToBeDeletedStats = 0;
std::atomic< uint64_t > LevelDB::g_keyDeletesStats = 0;

uint64_t LevelDB::getKeyDeletesStats() {
return g_keyDeletesStats;
}


} // namespace dev::db
25 changes: 13 additions & 12 deletions libdevcore/LevelDB.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@ class LevelDB : public DatabaseFace {

std::string lookup( Slice _key ) const override;
bool exists( Slice _key ) const override;

std::string lookup( Slice _key, const std::shared_ptr< LevelDBSnap >& _snap ) const;
bool exists( Slice _key, const std::shared_ptr< LevelDBSnap >& _snap ) const;


void insert( Slice _key, Slice _value ) override;
void kill( Slice _key ) override;

Expand All @@ -74,6 +69,15 @@ class LevelDB : public DatabaseFace {
void forEachWithPrefix(
std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const override;

// create a read only snap after blockl processing
void createBlockSnap( uint64_t _blockNumber );

// get block snap for the lasty block
std::shared_ptr< LevelDBSnap > getLastBlockSnap() const;

// perform operations with respect to a particular read only snap
std::string lookup( Slice _key, const std::shared_ptr< LevelDBSnap >& _snap ) const;
bool exists( Slice _key, const std::shared_ptr< LevelDBSnap >& _snap ) const;
void forEachWithPrefix( std::string& _prefix, std::function< bool( Slice, Slice ) > f,
const std::shared_ptr< LevelDBSnap >& _snap ) const;

Expand All @@ -84,19 +88,19 @@ class LevelDB : public DatabaseFace {

void doCompaction() const;

void createBlockSnap( uint64_t _blockNumber );

// Return the total count of key deletes since the start
static uint64_t getKeyDeletesStats();
// count of the keys that were deleted since the start of skaled
static std::atomic< uint64_t > g_keyDeletesStats;
// count of the keys that are scheduled to be deleted but are not yet deleted
static std::atomic< uint64_t > g_keysToBeDeletedStats;
static uint64_t getCurrentTimeMs();
std::shared_ptr< LevelDBSnap > getLastBlockSnap() const;

private:
std::unique_ptr< leveldb::DB > m_db;

// stores and manages snap objects
LevelDBSnapManager m_snapManager;
// this is incremented each time this LevelDB instance is reopened
// we reopen states LevelDB every day on archive nodes to avoid
// meta file getting too large
Expand All @@ -112,11 +116,8 @@ class LevelDB : public DatabaseFace {
uint64_t m_lastDBOpenTimeMs;
mutable std::shared_mutex m_dbMutex;

LevelDBSnapManager m_snapManager;


static constexpr size_t BATCH_CHUNK_SIZE = 10000;

static const size_t BATCH_CHUNK_SIZE;

class SharedDBGuard {
const LevelDB& m_levedlDB;
Expand Down
1 change: 1 addition & 0 deletions libethcore/TransactionBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,7 @@ class TransactionBase {
///< refunded once the contract is ended.
bytes m_data; ///< The data associated with the transaction, or the initialiser if it's a
///< creation transaction.
// use shared pointer here speed up copy of transaction objects and save memory
std::shared_ptr< bytes > m_rawData =
std::make_shared< bytes >(); ///< Raw data, not owned by this object.>
std::vector< bytes > m_accessList; ///< The access list. see more
Expand Down
11 changes: 8 additions & 3 deletions libethereum/Block.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,6 @@ class Block {
/// Copy state object.
Block& operator=( Block const& _s );

Block getReadOnlyCopy() const;

/// Get the author address for any transactions we do and rewards we get.
Address author() const { return m_author; }

Expand Down Expand Up @@ -218,10 +216,15 @@ class Block {

/// Execute a given transaction.
/// This will append @a _t to the transaction list and change the state accordingly.
/// If transaction is part of the block we pass transaction index in the block
ExecutionResult execute( LastBlockHashesFace const& _lh, Transaction const& _t,
skale::Permanence _p = skale::Permanence::Committed, OnOpFunc const& _onOp = OnOpFunc(),
int64_t _transactionIndex = -1 );

// this returns a read only copy of the block that uses
// snap-based state object
Block getReadOnlyCopy() const;

#ifdef HISTORIC_STATE
ExecutionResult executeHistoricCall( LastBlockHashesFace const& _lh, Transaction const& _t,
std::shared_ptr< AlethStandardTrace > _tracer, uint64_t _transactionIndex );
Expand All @@ -234,7 +237,9 @@ class Block {
/// and bool, true iff there are more transactions to be processed.
std::pair< TransactionReceipts, bool > sync( BlockChain const& _bc, TransactionQueue& _tq,
GasPricer const& _gp, unsigned _msTimeout = 100 );
static void doPartialCatchupTestIfRequested( unsigned _i );

// this will crash skaled during after execution of a particular transaction
static void doPartialCatchupTestIfRequested( unsigned _transactionIndexWhereToCrash );

/// Sync our state with the block chain.
/// This basically involves wiping ourselves if we've been superceded and rebuilding from the
Expand Down

0 comments on commit 232ec28

Please sign in to comment.