Skip to content

Commit

Permalink
Remove max retries (#241)
Browse files Browse the repository at this point in the history
* feat: sign and store router address

Signed-off-by: failfmi <[email protected]>

* fix: transfer data check

Signed-off-by: failfmi <[email protected]>

* fix: check if fee exists

Signed-off-by: failfmi <[email protected]>

* e2e: add router address to expected transfer

Signed-off-by: failfmi <[email protected]>

* Draft documentation

Signed-off-by: radtonev <[email protected]>

* Draft documentation

Signed-off-by: radtonev <[email protected]>

* Draft documentation

Signed-off-by: radtonev <[email protected]>

* Draft documentation

Signed-off-by: radtonev <[email protected]>

* remove max retries

Signed-off-by: radtonev <[email protected]>

* restore config

Signed-off-by: radtonev <[email protected]>

* remove unnecessary func

Signed-off-by: radtonev <[email protected]>

* refactor: remove max retries

Signed-off-by: failfmi <[email protected]>

Co-authored-by: failfmi <[email protected]>
  • Loading branch information
radtonev and failfmi authored May 21, 2021
1 parent 4ad87d5 commit 18c130b
Show file tree
Hide file tree
Showing 9 changed files with 6 additions and 37 deletions.
1 change: 1 addition & 0 deletions app/process/watcher/ethereum/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ func (ew *Watcher) listenForEvents(q *pair.Queue) {
select {
case err := <-sub.Err():
ew.logger.Errorf("Burn Event Logs subscription failed. Error: [%s].", err)
go ew.listenForEvents(q)
return
case eventLog := <-events:
go ew.handleLog(eventLog, q)
Expand Down
17 changes: 2 additions & 15 deletions app/process/watcher/message/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,11 @@ type Watcher struct {
topicID hedera.TopicID
statusRepository repository.Status
pollingInterval time.Duration
maxRetries int
startTimestamp int64
logger *log.Entry
}

func NewWatcher(client client.MirrorNode, topicID string, repository repository.Status, pollingInterval time.Duration, maxRetries int, startTimestamp int64) *Watcher {
func NewWatcher(client client.MirrorNode, topicID string, repository repository.Status, pollingInterval time.Duration, startTimestamp int64) *Watcher {
id, err := hedera.TopicIDFromString(topicID)
if err != nil {
log.Fatalf("Could not start Consensus Topic Watcher for topic [%s] - Error: [%s]", topicID, err)
Expand All @@ -54,7 +53,6 @@ func NewWatcher(client client.MirrorNode, topicID string, repository repository.
statusRepository: repository,
startTimestamp: startTimestamp,
pollingInterval: pollingInterval,
maxRetries: maxRetries,
logger: config.GetLoggerFor(fmt.Sprintf("[%s] Topic Watcher", topicID)),
}
}
Expand Down Expand Up @@ -103,7 +101,7 @@ func (cmw Watcher) beginWatching(q *pair.Queue) {
messages, err := cmw.client.GetMessagesAfterTimestamp(cmw.topicID, milestoneTimestamp)
if err != nil {
cmw.logger.Errorf("Error while retrieving messages from mirror node. Error [%s]", err)
cmw.restart(q)
go cmw.beginWatching(q)
return
}

Expand Down Expand Up @@ -133,14 +131,3 @@ func (cmw Watcher) processMessage(topicMsg mirror_node.Message, q *pair.Queue) {

q.Push(&pair.Message{Payload: msg})
}

func (cmw *Watcher) restart(q *pair.Queue) {
if cmw.maxRetries > 0 {
cmw.maxRetries--
cmw.logger.Infof("Watcher is trying to reconnect. Connections left [%d]", cmw.maxRetries)
time.Sleep(5 * time.Second)
go cmw.beginWatching(q)
return
}
cmw.logger.Errorf("Watcher failed: [Too many retries]")
}
16 changes: 1 addition & 15 deletions app/process/watcher/transfer/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ type Watcher struct {
accountID hedera.AccountID
pollingInterval time.Duration
statusRepository repository.Status
maxRetries int
startTimestamp int64
logger *log.Entry
contractService service.Contracts
Expand All @@ -51,7 +50,6 @@ func NewWatcher(
accountID string,
pollingInterval time.Duration,
repository repository.Status,
maxRetries int,
startTimestamp int64,
contractService service.Contracts,
) *Watcher {
Expand All @@ -66,7 +64,6 @@ func NewWatcher(
accountID: id,
pollingInterval: pollingInterval,
statusRepository: repository,
maxRetries: maxRetries,
startTimestamp: startTimestamp,
logger: config.GetLoggerFor(fmt.Sprintf("[%s] Transfer Watcher", accountID)),
contractService: contractService,
Expand Down Expand Up @@ -117,7 +114,7 @@ func (ctw Watcher) beginWatching(q *pair.Queue) {
transactions, e := ctw.client.GetAccountCreditTransactionsAfterTimestamp(ctw.accountID, milestoneTimestamp)
if e != nil {
ctw.logger.Errorf("Suddenly stopped monitoring account - [%s]", e)
ctw.restart(q)
go ctw.beginWatching(q)
return
}

Expand Down Expand Up @@ -162,14 +159,3 @@ func (ctw Watcher) processTransaction(tx mirror_node.Transaction, q *pair.Queue)
transferMessage := transfer.New(tx.TransactionID, ethAddress, nativeAsset, wrappedAsset, amount, ctw.contractService.Address().String())
q.Push(&pair.Message{Payload: transferMessage})
}

func (ctw *Watcher) restart(q *pair.Queue) {
if ctw.maxRetries > 0 {
ctw.maxRetries--
ctw.logger.Infof("Watcher is trying to reconnect")
time.Sleep(5 * time.Second)
go ctw.beginWatching(q)
return
}
ctw.logger.Errorf("Watcher failed: [Too many retries]")
}
1 change: 1 addition & 0 deletions app/services/contracts/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ func (bsc *Service) listenForMemberUpdatedEvent() {
select {
case err := <-sub.Err():
bsc.logger.Errorf("MemberUpdated Event Logs subscription failed. Error [%s].", err)
go bsc.listenForMemberUpdatedEvent()
return
case <-events:
bsc.updateMembers()
Expand Down
2 changes: 0 additions & 2 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ func addTransferWatcher(configuration *config.Config,
account,
configuration.Validator.Clients.MirrorNode.PollingInterval,
*repository,
configuration.Validator.Clients.MirrorNode.MaxRetries,
startTimestamp,
contractService)
}
Expand All @@ -168,6 +167,5 @@ func addConsensusTopicWatcher(configuration *config.Config,
topic,
repository,
configuration.Validator.Clients.MirrorNode.PollingInterval,
configuration.Validator.Clients.MirrorNode.MaxRetries,
startTimestamp)
}
3 changes: 1 addition & 2 deletions config/application.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,9 @@ validator:
mirror_node:
api_address: https://testnet.mirrornode.hedera.com/api/v1/
client_address: hcs.testnet.mirrornode.hedera.com:5600
max_retries: 10
polling_interval: 5
log_level: info
port: 5200
recovery:
start_timestamp:
rest-api-only: false
rest-api-only: false
1 change: 0 additions & 1 deletion config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ type MirrorNode struct {
ClientAddress string `yaml:"client_address" env:"VALIDATOR_CLIENTS_MIRROR_NODE_CLIENT_ADDRESS"`
ApiAddress string `yaml:"api_address" env:"VALIDATOR_CLIENTS_MIRROR_NODE_API_ADDRESS"`
PollingInterval time.Duration `yaml:"polling_interval" env:"VALIDATOR_CLIENTS_MIRROR_NODE_POLLING_INTERVAL"`
MaxRetries int `yaml:"max_retries" env:"VALIDATOR_CLIENTS_MIRROR_NODE_TOPIC_ID"`
}

type Database struct {
Expand Down
1 change: 0 additions & 1 deletion docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ Name | Default
`validator.clients.hedera.topic_id` | "" | The topic id that the validators use to monitor for incoming hedera consensus messages.
`validator.clients.mirror_node.api_address` | https://testnet.mirrornode.hedera.com/api/v1/ | The Hedera Rest API root endpoint. Depending on the Hedera network type, this will need to be changed.
`validator.clients.mirror_node.client_address` | hcs.testnet.mirrornode.hedera.com:5600 | The HCS Mirror node endpoint. Depending on the Hedera network type, this will need to be changed.
`validator.clients.mirror_node.max_retries` | 10 | The maximum number of retries that the mirror node has to continue monitoring after a failure, before stopping completely.
`validator.clients.mirror_node.polling_interval` | 5 | How often (in seconds) the application will poll the mirror node for new transactions.
`validator.log_level` | info | The log level of the validator. Possible values: `info`, `debug`, `trace` case insensitive.
`validator.port` | 5200 | The port on which the application runs.
Expand Down
1 change: 0 additions & 1 deletion docs/testing.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ Name | Description
`hedera.members` | The Hedera account ids of the validators, to which their bridge fees will be sent (if Bridge accepts Hedera Tokens, associations with these tokens will be required). Used to assert balances after transactions.
`hedera.mirror_node.api_address` | The Hedera Rest API root endpoint. Depending on the Hedera network type, this will need to be changed.
`hedera.mirror_node.client_address` | The HCS Mirror node endpoint. Depending on the Hedera network type, this will need to be changed.
`hedera.mirror_node.max_retries` | The maximum number of retries that the mirror node has to continue monitoring after a failure, before stopping completely.
`hedera.mirror_node.polling_interval` | How often (in seconds) the application will poll the mirror node for new transactions.
`hedera.network_type` | Which Hedera network to use. Can be either `mainnet`, `previewnet`, `testnet`.
`hedera.sender.account` | The account that will be sending assets through the bridge.
Expand Down

0 comments on commit 18c130b

Please sign in to comment.