From 2561595a6fcacbefeaa32233428cdc2eb92f3a83 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Mon, 18 Nov 2024 16:50:44 +0100 Subject: [PATCH 01/13] add files for computing parts of the accounting --- src/fetch/buffer_accounting.py | 39 +++++++++++++++++ src/fetch/partner_fees.py | 62 +++++++++++++++++++++++++++ src/fetch/protocol_fees.py | 25 +++++++++++ src/fetch/rewards.py | 76 ++++++++++++++++++++++++++++++++++ src/fetch/solver_info.py | 63 ++++++++++++++++++++++++++++ 5 files changed, 265 insertions(+) create mode 100644 src/fetch/buffer_accounting.py create mode 100644 src/fetch/partner_fees.py create mode 100644 src/fetch/protocol_fees.py create mode 100644 src/fetch/rewards.py create mode 100644 src/fetch/solver_info.py diff --git a/src/fetch/buffer_accounting.py b/src/fetch/buffer_accounting.py new file mode 100644 index 00000000..772cb113 --- /dev/null +++ b/src/fetch/buffer_accounting.py @@ -0,0 +1,39 @@ +"""Functionality for buffer accounting.""" + +from pandas import DataFrame + +BATCH_DATA_COLUMNS = ["solver", "network_fee_eth"] +SLIPPAGE_COLUMNS = [ + "solver", + "eth_slippage_wei", +] + +BUFFER_ACCOUNTING_COLUMNS = [ + "solver", + "network_fee_eth", + "slippage_eth", +] + + +def compute_buffer_accounting( + batch_data: DataFrame, slippage_data: DataFrame +) -> DataFrame: + """Compute buffer accounting per solver""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_DATA_COLUMNS).issubset(set(batch_data.columns)) + assert set(SLIPPAGE_COLUMNS).issubset(set(slippage_data.columns)) + + buffer_accounting = batch_data[BATCH_DATA_COLUMNS].merge( + slippage_data[SLIPPAGE_COLUMNS], how="outer", on="solver", validate="one_to_one" + ) + buffer_accounting = buffer_accounting.rename( + columns={"eth_slippage_wei": "slippage_eth"} + ) + + # change all types to object to use native python types + buffer_accounting = buffer_accounting.astype(object) + + assert set(buffer_accounting.columns) == set(BUFFER_ACCOUNTING_COLUMNS) + + return buffer_accounting diff --git a/src/fetch/partner_fees.py b/src/fetch/partner_fees.py new file mode 100644 index 00000000..11954c9d --- /dev/null +++ b/src/fetch/partner_fees.py @@ -0,0 +1,62 @@ +"""Functionality for partner fees.""" + +from collections import defaultdict + +import numpy as np +import pandas as pd +from pandas import DataFrame + +from src.config import ProtocolFeeConfig + +BATCH_DATA_COLUMNS = ["partner_list", "partner_fee_eth"] + +PARTNER_FEES_COLUMNS = ["partner", "partner_fee_eth", "partner_fee_tax"] + + +def compute_partner_fees(batch_data: DataFrame, config: ProtocolFeeConfig) -> DataFrame: + """Compute partner fees per integrator""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_DATA_COLUMNS).issubset(set(batch_data.columns)) + + partner_fee_lists = batch_data[BATCH_DATA_COLUMNS] + + partner_fees = compute_partner_fees_per_partner(partner_fee_lists, config) + + return partner_fees + + +def compute_partner_fees_per_partner( + partner_fee_lists: DataFrame, config: ProtocolFeeConfig +) -> DataFrame: + """Aggregate fees from different solvers""" + + partner_fees_dict: defaultdict[str, int] = defaultdict(int) + for _, row in partner_fee_lists.iterrows(): + if row["partner_list"] is None: + continue + + # We assume the two lists used below, i.e., + # partner_list and partner_fee_eth, + # are "aligned". + + for partner, partner_fee in zip(row["partner_list"], row["partner_fee_eth"]): + partner_fees_dict[partner] += int(partner_fee) + + partner_fees_df = pd.DataFrame( + list(partner_fees_dict.items()), + columns=["partner", "partner_fee_eth"], + ) + + partner_fees_df["partner_fee_tax"] = np.where( + partner_fees_df["partner"] == config.reduced_cut_address, + config.partner_fee_reduced_cut, + config.partner_fee_cut, + ) + + # change all types to object to use native python types + partner_fees_df = partner_fees_df.astype(object) + + assert set(partner_fees_df.columns) == set(PARTNER_FEES_COLUMNS) + + return partner_fees_df diff --git a/src/fetch/protocol_fees.py b/src/fetch/protocol_fees.py new file mode 100644 index 00000000..2c3f3422 --- /dev/null +++ b/src/fetch/protocol_fees.py @@ -0,0 +1,25 @@ +"""Functionality for protocol fees.""" + +from pandas import DataFrame + +BATCH_DATA_COLUMNS = ["solver", "protocol_fee_eth"] + +PROTOCOL_FEES_COLUMNS = BATCH_DATA_COLUMNS + + +def compute_protocol_fees( + batch_data: DataFrame, +) -> DataFrame: + """Compute protocol fees per solver.""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_DATA_COLUMNS).issubset(set(batch_data.columns)) + + protocol_fees = batch_data[BATCH_DATA_COLUMNS].copy() + + # change all types to object to use native python types + protocol_fees = protocol_fees.astype(object) + + assert set(protocol_fees.columns) == set(PROTOCOL_FEES_COLUMNS) + + return protocol_fees diff --git a/src/fetch/rewards.py b/src/fetch/rewards.py new file mode 100644 index 00000000..c17ea8d6 --- /dev/null +++ b/src/fetch/rewards.py @@ -0,0 +1,76 @@ +"""Functionality for rewards.""" + +from fractions import Fraction + +from pandas import DataFrame + +from src.config import RewardConfig + + +BATCH_REWARDS_COLUMNS = ["solver", "primary_reward_eth"] +QUOTE_REWARDS_COLUMNS = ["solver", "num_quotes"] + +REWARDS_COLUMNS = [ + "solver", + "primary_reward_eth", + "primary_reward_cow", + "quote_reward_cow", + "reward_token_address", +] + +NUMERICAL_COLUMNS = [ + "primary_reward_eth", + "primary_reward_cow", + "quote_reward_cow", +] + + +def compute_rewards( + batch_data: DataFrame, + quote_rewards: DataFrame, + exchange_rate: Fraction, + reward_config: RewardConfig, +) -> DataFrame: + """Compute solver rewards""" + + # validate batch rewards and quote rewards columns + assert set(BATCH_REWARDS_COLUMNS).issubset(set(batch_data.columns)) + assert set(QUOTE_REWARDS_COLUMNS).issubset(set(quote_rewards.columns)) + + rewards = ( + batch_data[BATCH_REWARDS_COLUMNS] + .merge( + quote_rewards[QUOTE_REWARDS_COLUMNS], + how="outer", + on="solver", + validate="one_to_one", + ) + .fillna(0) + ) + + rewards["primary_reward_cow"] = rewards["primary_reward_eth"] * float(exchange_rate) + + # Pandas has poor support for large integers, must cast the constant to float here, + # otherwise the dtype would be inferred as int64 (which overflows). + rewards["quote_reward_cow"] = ( + float( + min( + reward_config.quote_reward_cow, + int(reward_config.quote_reward_cap_native * exchange_rate), + ) + ) + * rewards["num_quotes"] + ) + rewards = rewards.drop("num_quotes", axis=1) + + rewards["reward_token_address"] = str(reward_config.reward_token_address) + + # change all types to object to use native python types + rewards = rewards.astype(object) + + # for number_col in NUMERICAL_COLUMNS: + # rewards[number_col] = pandas.to_numeric(rewards[number_col]) + + assert set(rewards.columns) == set(REWARDS_COLUMNS) + + return rewards diff --git a/src/fetch/solver_info.py b/src/fetch/solver_info.py new file mode 100644 index 00000000..96bd7395 --- /dev/null +++ b/src/fetch/solver_info.py @@ -0,0 +1,63 @@ +"""Functionality for solver information.""" + +import numpy as np +from pandas import DataFrame + +from src.config import AccountingConfig +from src.logger import set_log + +log = set_log(__name__) + +REWARD_TARGETS_COLUMNS = ["solver", "reward_target", "pool_address", "solver_name"] +SERVICE_FEES_COLUMNS = ["solver", "service_fee"] + +SOLVER_INFO_COLUMNS = [ + "solver", + "reward_target", + "buffer_accounting_target", + "solver_name", + "service_fee", +] + + +def compute_solver_info( + reward_targets: DataFrame, + service_fees: DataFrame, + config: AccountingConfig, +) -> DataFrame: + """Compute solver information""" + + # validate reward targets and service fees columns + assert set(REWARD_TARGETS_COLUMNS).issubset(set(reward_targets.columns)) + assert set(SERVICE_FEES_COLUMNS).issubset(set(service_fees.columns)) + + solver_info = reward_targets[REWARD_TARGETS_COLUMNS].merge( + service_fees[SERVICE_FEES_COLUMNS], how="outer", on="solver" + ) + + solver_info["buffer_accounting_target"] = np.where( + solver_info["pool_address"] != config.reward_config.cow_bonding_pool.address, + solver_info["solver"], + solver_info["reward_target"], + ) + solver_info = solver_info.drop("pool_address", axis=1) + + solver_info["service_fee"] = [ + ( + service_fees_flag * config.reward_config.service_fee_factor + if service_fees_flag is not None + else 0 + ) + for service_fees_flag in solver_info["service_fee"] + ] + + if not solver_info["solver"].is_unique: + duplicate_solvers = solver_info[solver_info["solver"].duplicated(keep=False)] + log.warning(f"Duplicate solvers: {duplicate_solvers}. Choosing first entry.") + solver_info = solver_info.drop_duplicates(subset=["solver"]) + + solver_info = solver_info.astype(object) + + assert set(solver_info.columns) == set(SOLVER_INFO_COLUMNS) + + return solver_info From 69f18d2a41eadc952368d5e4d9c5b626340e7738 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Fri, 15 Nov 2024 17:18:06 +0100 Subject: [PATCH 02/13] call new files in accountingold code is not yet removed# Conflicts:# src/fetch/payouts.py --- src/fetch/payouts.py | 86 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 77 insertions(+), 9 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 852b0115..7e676977 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -7,6 +7,7 @@ from dataclasses import dataclass from datetime import timedelta from fractions import Fraction +from functools import reduce from typing import Callable import numpy as np @@ -17,6 +18,11 @@ from src.config import AccountingConfig from src.fetch.dune import DuneFetcher from src.fetch.prices import exchange_rate_atoms +from src.fetch.solver_info import compute_solver_info +from src.fetch.rewards import compute_rewards +from src.fetch.protocol_fees import compute_protocol_fees +from src.fetch.partner_fees import compute_partner_fees +from src.fetch.buffer_accounting import compute_buffer_accounting from src.logger import log_saver from src.models.accounting_period import AccountingPeriod from src.models.overdraft import Overdraft @@ -442,6 +448,45 @@ def construct_payout_dataframe( return merged_df +def construct_solver_payouts_dataframe( + solver_info: DataFrame, + rewards: DataFrame, + protocol_fees: DataFrame, + buffer_accounting: DataFrame, +) -> DataFrame: + # 1. Validate data + + # 2. Normalize Join Column (and Ethereum Address Field) + join_column = "solver" + normalize_address_field(solver_info, join_column) + normalize_address_field(rewards, join_column) + normalize_address_field(protocol_fees, join_column) + normalize_address_field(buffer_accounting, join_column) + + # 3. Merge data + solver_payouts = reduce( + lambda left, right: left.merge( + right, + how="outer", + on="solver", + validate="one_to_one", + sort=True, + ), + [rewards, protocol_fees, buffer_accounting], + ).merge(solver_info, how="left", on="solver") + + # 4. Set default values + solver_payouts["primary_reward_eth"] = solver_payouts["primary_reward_eth"].fillna( + 0 + ) + solver_payouts["slippage_eth"] = solver_payouts["slippage_eth"].fillna(0) + solver_payouts["protocol_fee_eth"] = solver_payouts["protocol_fee_eth"].fillna(0) + solver_payouts["network_fee_eth"] = solver_payouts["network_fee_eth"].fillna(0) + solver_payouts["service_fee"] = solver_payouts["service_fee"].fillna(Fraction(0, 1)) # type: ignore + + return solver_payouts + + def construct_partner_fee_payments( partner_fees_df: DataFrame, config: AccountingConfig ) -> tuple[dict[str, int], int]: @@ -492,16 +537,16 @@ def construct_payouts( # pylint: disable-msg=too-many-locals quote_rewards_df = orderbook.get_quote_rewards(dune.start_block, dune.end_block) - batch_rewards_df = orderbook.get_solver_rewards( + batch_data = orderbook.get_solver_rewards( dune.start_block, dune.end_block, config.reward_config.batch_reward_cap_upper, config.reward_config.batch_reward_cap_lower, ) - partner_fees_df = batch_rewards_df[["partner_list", "partner_fee_eth"]] - batch_rewards_df = batch_rewards_df.drop( - ["partner_list", "partner_fee_eth"], axis=1 - ) + batch_rewards_df = batch_data[ + ["solver", "primary_reward_eth", "protocol_fee_eth", "network_fee_eth"] + ] + partner_fees_df = batch_data[["partner_list", "partner_fee_eth"]] assert batch_rewards_df["solver"].is_unique, "solver not unique in batch rewards" assert quote_rewards_df["solver"].is_unique, "solver not unique in quote rewards" @@ -535,12 +580,35 @@ def construct_payouts( reward_token = config.reward_config.reward_token_address native_token = Address(config.payment_config.weth_address) price_day = dune.period.end - timedelta(days=1) + exchange_rate_native_to_cow = exchange_rate_atoms( + native_token, reward_token, price_day + ) converter = TokenConversion( - eth_to_token=lambda t: exchange_rate_atoms( - native_token, reward_token, price_day - ) - * t, + eth_to_token=lambda t: exchange_rate_native_to_cow * t, + ) + + solver_info = compute_solver_info( + dune.period.start, + reward_target_df, + service_fee_df, + config, ) + rewards = compute_rewards( + batch_rewards_df, + quote_rewards_df, + exchange_rate_native_to_cow, + config.reward_config, + ) + protocol_fees = compute_protocol_fees( + batch_data, + ) + buffer_accounting = compute_buffer_accounting(batch_data, slippage_df) + + solver_payouts = construct_solver_payouts_dataframe( + solver_info, rewards, protocol_fees, buffer_accounting + ) + + partner_payouts = compute_partner_fees(batch_data, config.protocol_fee_config) complete_payout_df = construct_payout_dataframe( # Fetch and extend auction data from orderbook. From 5b5e6d49708bb7ed6466e274b5d54ecc7549a8a0 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Tue, 19 Nov 2024 11:58:40 +0100 Subject: [PATCH 03/13] create transfers from new dataframes --- src/fetch/payouts.py | 109 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 7e676977..c8ac0a97 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -61,6 +61,17 @@ "protocol_fee_eth", ] +SOLVER_PAYOUTS_COLUMNS = [ + "solver", + "primary_reward_eth", + "primary_reward_cow", + "quote_reward_cow", + "protocol_fee_eth", + "network_fee_eth", + "slippage_eth", +] +PARTNER_PAYOUTS_COLUMNS = ["partner", "partner_fee_eth", "partner_fee_tax"] + @dataclass class PeriodPayouts: @@ -137,6 +148,36 @@ def from_series(cls, frame: Series) -> RewardAndPenaltyDatum: reward_token_address=Address(frame["reward_token_address"]), ) + @classmethod + def from_series_new(cls, frame: Series) -> RewardAndPenaltyDatum: + """Constructor from row in Dataframe""" + slippage = int(frame["slippage_eth"]) + int(frame["network_fee_eth"]) + solver = frame["solver"] + reward_target = frame["reward_target"] + if reward_target is None: + logging.warning(f"Solver {solver} without reward_target. Using solver") + reward_target = solver + + buffer_accounting_target = frame["buffer_accounting_target"] + if buffer_accounting_target is None: + logging.warning( + f"Solver {solver} without buffer_accounting_target. Using solver" + ) + buffer_accounting_target = solver + + return cls( + solver=Address(solver), + solver_name=frame["solver_name"], + reward_target=Address(reward_target), + buffer_accounting_target=Address(buffer_accounting_target), + slippage_eth=slippage, + primary_reward_eth=int(frame["primary_reward_eth"]), + primary_reward_cow=int(frame["primary_reward_cow"]), + quote_reward_cow=int(frame["quote_reward_cow"]), + service_fee=Fraction(frame["service_fee"]), + reward_token_address=Address(frame["reward_token_address"]), + ) + def total_outgoing_eth(self) -> int: """Total outgoing amount (including slippage) for the payout.""" return self.total_eth_reward() + self.slippage_eth @@ -305,6 +346,74 @@ def extend_payment_df( return pdf +def prepare_transfers_new( + solver_payouts: DataFrame, + partner_payouts: DataFrame, + period: AccountingPeriod, + config: AccountingConfig, +) -> PeriodPayouts: + """Create transfers from payout data.""" + + assert set(SOLVER_PAYOUTS_COLUMNS).issubset(set(solver_payouts.columns)) + assert set(PARTNER_PAYOUTS_COLUMNS).issubset(set(partner_payouts.columns)) + + overdrafts: list[Overdraft] = [] + transfers: list[Transfer] = [] + for _, payment in solver_payouts.iterrows(): + payout_datum = RewardAndPenaltyDatum.from_series_new(payment) + if payout_datum.is_overdraft(): + overdraft = Overdraft( + period=period, + account=payout_datum.solver, + name=payout_datum.solver_name, + wei=-int(payout_datum.total_outgoing_eth()), + ) + print(f"Solver Overdraft! {overdraft}") + overdrafts.append(overdraft) + transfers += payout_datum.as_payouts() + + total_protocol_fee = int(solver_payouts["protocol_fee_eth"].sum()) + total_partner_fee = int(partner_payouts["partner_fee_eth"].sum()) + total_partner_fee_taxed = sum( + int(row["partner_fee_eth"] * (1 - row["partner_fee_tax"])) + for _, row in partner_payouts.iterrows() + ) + total_partner_fee_tax = total_partner_fee - total_partner_fee_taxed + + net_protocol_fee = total_protocol_fee - total_partner_fee + + if net_protocol_fee > 0: + transfers.append( + Transfer( + token=None, + recipient=config.protocol_fee_config.protocol_fee_safe, + amount_wei=net_protocol_fee, + ) + ) + if total_partner_fee_tax > 0: + transfers.append( + Transfer( + token=None, + recipient=config.protocol_fee_config.protocol_fee_safe, + amount_wei=total_partner_fee_tax, + ) + ) + for _, row in partner_payouts.iterrows(): + partner = row["partner"] + partner_fee = int(row["partner_fee_eth"] * (1 - row["partner_fee_tax"])) + assert partner_fee >= 0, f"Can't construct negative transfer of {partner_fee}" + if partner_fee > 0: + transfers.append( + Transfer( + token=None, + recipient=Address(partner), + amount_wei=partner_fee, + ) + ) + + return PeriodPayouts(overdrafts, transfers) + + def prepare_transfers( # pylint: disable=too-many-arguments payout_df: DataFrame, period: AccountingPeriod, From 9151e816edfabd624ff8957f196146a03362ca43 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Tue, 19 Nov 2024 11:40:34 +0100 Subject: [PATCH 04/13] summarize payouts --- src/fetch/payouts.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index c8ac0a97..6f371252 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -636,6 +636,32 @@ def construct_partner_fee_payments( return partner_fees_wei, total_partner_fee_wei_untaxed +def summarize_payments(solver_payouts: DataFrame, partner_payouts: DataFrame): + performance_reward = solver_payouts["primary_reward_cow"].sum() + quote_reward = solver_payouts["quote_reward_cow"].sum() + protocol_fee = solver_payouts["protocol_fee_eth"].sum() + service_fee = sum( + solver_payouts["service_fee"] + * (solver_payouts["primary_reward_cow"] + solver_payouts["quote_reward_cow"]) + ) + partner_fee = partner_payouts["partner_fee_eth"].sum() + partner_fee_taxed = sum( + row["partner_fee_eth"] * (1 - row["partner_fee_tax"]) + for _, row in partner_payouts.iterrows() + ) + partner_fee_tax = partner_fee - partner_fee_taxed + + print( + "Payment breakdown:\n" + f"Performance Reward (before fee): {performance_reward / 10 ** 18:.4f}\n" + f"Quote Reward (before fee): {quote_reward / 10 ** 18:.4f}\n" + f"COW DAO Service Fees: {service_fee / 10 ** 18:.4f}\n", + f"Protocol Fees (before partner fees): {protocol_fee / 10 ** 18:.4f}\n" + f"Partner Fees (before tax): {partner_fee / 10 ** 18:.4f}\n" + f"Partner Fees Tax: {partner_fee_tax / 10 ** 18:.4f}\n", + ) + + def construct_payouts( orderbook: MultiInstanceDBFetcher, dune: DuneFetcher, @@ -719,6 +745,11 @@ def construct_payouts( partner_payouts = compute_partner_fees(batch_data, config.protocol_fee_config) + summarize_payments(solver_payouts, partner_payouts) + payouts_new = prepare_transfers_new( + solver_payouts, partner_payouts, dune.period, config + ) + complete_payout_df = construct_payout_dataframe( # Fetch and extend auction data from orderbook. payment_df=extend_payment_df( From 2283a041f58dcaaf1db7dab8f29dc5a9e434441c Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Wed, 20 Nov 2024 16:43:42 +0100 Subject: [PATCH 05/13] start removing old payout code --- src/fetch/payouts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 6f371252..680180ea 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -746,7 +746,7 @@ def construct_payouts( partner_payouts = compute_partner_fees(batch_data, config.protocol_fee_config) summarize_payments(solver_payouts, partner_payouts) - payouts_new = prepare_transfers_new( + payouts = prepare_transfers_new( solver_payouts, partner_payouts, dune.period, config ) @@ -794,7 +794,7 @@ def construct_payouts( f"COW DAO Service Fees: {service_fee / 10 ** 18:.4f}\n", category=Category.TOTALS, ) - payouts = prepare_transfers( + payouts_old = prepare_transfers( complete_payout_df, dune.period, final_protocol_fee_wei, From e9cb6c9c27ca1fe6dc604aa7f1c66a4514edd927 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 21 Nov 2024 12:26:33 +0100 Subject: [PATCH 06/13] remove old code # Conflicts: # src/fetch/payouts.py --- src/fetch/payouts.py | 341 ++++--------------------------------------- 1 file changed, 26 insertions(+), 315 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 680180ea..3ddcc9cb 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -3,14 +3,11 @@ from __future__ import annotations import logging -import math from dataclasses import dataclass -from datetime import timedelta +from datetime import datetime, timedelta from fractions import Fraction from functools import reduce -from typing import Callable -import numpy as np import pandas from dune_client.types import Address from pandas import DataFrame, Series @@ -116,40 +113,6 @@ def __init__( # pylint: disable=too-many-arguments @classmethod def from_series(cls, frame: Series) -> RewardAndPenaltyDatum: - """Constructor from row in Dataframe""" - slippage = ( - int(frame["eth_slippage_wei"]) - if not math.isnan(frame["eth_slippage_wei"]) - else 0 - ) - solver = frame["solver"] - reward_target = frame["reward_target"] - if reward_target is None: - logging.warning(f"Solver {solver} without reward_target. Using solver") - reward_target = solver - - buffer_accounting_target = frame["buffer_accounting_target"] - if buffer_accounting_target is None: - logging.warning( - f"Solver {solver} without buffer_accounting_target. Using solver" - ) - buffer_accounting_target = solver - - return cls( - solver=Address(solver), - solver_name=frame["solver_name"], - reward_target=Address(reward_target), - buffer_accounting_target=Address(buffer_accounting_target), - slippage_eth=slippage, - primary_reward_eth=int(frame["primary_reward_eth"]), - primary_reward_cow=int(frame["primary_reward_cow"]), - quote_reward_cow=int(frame["quote_reward_cow"]), - service_fee=Fraction(frame["service_fee"]), - reward_token_address=Address(frame["reward_token_address"]), - ) - - @classmethod - def from_series_new(cls, frame: Series) -> RewardAndPenaltyDatum: """Constructor from row in Dataframe""" slippage = int(frame["slippage_eth"]) + int(frame["network_fee_eth"]) solver = frame["solver"] @@ -308,45 +271,7 @@ def as_payouts(self) -> list[Transfer]: return result -@dataclass -class TokenConversion: - """ - Data Structure containing token conversion methods. - """ - - eth_to_token: Callable - - -def extend_payment_df( - pdf: DataFrame, converter: TokenConversion, config: AccountingConfig -) -> DataFrame: - """ - Extending the basic columns returned by SQL Query with some after-math: - - reward_eth as difference of payment and execution_cost - - reward_cow as conversion from ETH to cow. - """ - # Note that this can be negative! - pdf["primary_reward_cow"] = pdf["primary_reward_eth"].apply(converter.eth_to_token) - - # Pandas has poor support for large integers, must cast the constant to float here, - # otherwise the dtype would be inferred as int64 (which overflows). - pdf["quote_reward_cow"] = ( - float( - min( - config.reward_config.quote_reward_cow, - converter.eth_to_token(config.reward_config.quote_reward_cap_native), - ) - ) - * pdf["num_quotes"] - ) - - for number_col in NUMERICAL_COLUMNS: - pdf[number_col] = pandas.to_numeric(pdf[number_col]) - - return pdf - - -def prepare_transfers_new( +def prepare_transfers( solver_payouts: DataFrame, partner_payouts: DataFrame, period: AccountingPeriod, @@ -360,7 +285,7 @@ def prepare_transfers_new( overdrafts: list[Overdraft] = [] transfers: list[Transfer] = [] for _, payment in solver_payouts.iterrows(): - payout_datum = RewardAndPenaltyDatum.from_series_new(payment) + payout_datum = RewardAndPenaltyDatum.from_series(payment) if payout_datum.is_overdraft(): overdraft = Overdraft( period=period, @@ -414,65 +339,12 @@ def prepare_transfers_new( return PeriodPayouts(overdrafts, transfers) -def prepare_transfers( # pylint: disable=too-many-arguments - payout_df: DataFrame, - period: AccountingPeriod, - final_protocol_fee_wei: int, - partner_fee_tax_wei: int, - partner_fees_wei: dict[str, int], - config: AccountingConfig, -) -> PeriodPayouts: - """ - Manipulates the payout DataFrame to split into ETH and COW. - Specifically, We deduct total_rewards by total_execution_cost (both initially in ETH) - keep the execution cost in ETH and convert the difference to COW. - """ - assert COMPLETE_COLUMNS.issubset(set(payout_df.columns)) - - overdrafts: list[Overdraft] = [] - transfers: list[Transfer] = [] - for _, payment in payout_df.iterrows(): - payout_datum = RewardAndPenaltyDatum.from_series(payment) - if payout_datum.is_overdraft(): - overdraft = Overdraft( - period=period, - account=payout_datum.solver, - name=payout_datum.solver_name, - wei=-int(payout_datum.total_outgoing_eth()), - ) - print(f"Solver Overdraft! {overdraft}") - overdrafts.append(overdraft) - transfers += payout_datum.as_payouts() - - if final_protocol_fee_wei > 0: - transfers.append( - Transfer( - token=None, - recipient=config.protocol_fee_config.protocol_fee_safe, - amount_wei=final_protocol_fee_wei, - ) - ) - if partner_fee_tax_wei > 0: - transfers.append( - Transfer( - token=None, - recipient=config.protocol_fee_config.protocol_fee_safe, - amount_wei=partner_fee_tax_wei, - ) - ) - for address in partner_fees_wei: - amount_wei = partner_fees_wei[address] - assert amount_wei >= 0, f"Can't construct negative transfer of {amount_wei}" - if amount_wei > 0: - transfers.append( - Transfer( - token=None, - recipient=Address(address), - amount_wei=amount_wei, - ) - ) - - return PeriodPayouts(overdrafts, transfers) +def fetch_exchange_rate(period_end: datetime, config: AccountingConfig) -> Fraction: + """Fetch exchange rate for converting the native token to COW.""" + reward_token = config.reward_config.reward_token_address + native_token = Address(config.payment_config.weth_address) + price_day = period_end - timedelta(days=1) + return exchange_rate_atoms(native_token, reward_token, price_day) def validate_df_columns( @@ -506,63 +378,13 @@ def normalize_address_field(frame: DataFrame, column_name: str) -> None: frame[column_name] = frame[column_name].str.lower() -def construct_payout_dataframe( - payment_df: DataFrame, - slippage_df: DataFrame, - reward_target_df: DataFrame, - service_fee_df: DataFrame, - config: AccountingConfig, -) -> DataFrame: - """ - Method responsible for joining datasets related to payouts. - Namely, reward targets and slippage (coming from Dune) - with reward and execution data coming from orderbook. - """ - # 1. Assert existence of required columns. - validate_df_columns(payment_df, slippage_df, reward_target_df, service_fee_df) - - # 2. Normalize Join Column (and Ethereum Address Field) - join_column = "solver" - normalize_address_field(payment_df, join_column) - normalize_address_field(slippage_df, join_column) - normalize_address_field(reward_target_df, join_column) - normalize_address_field(service_fee_df, join_column) - - # 3. Merge the three dataframes (joining on solver) - merged_df = ( - payment_df.merge(slippage_df, on=join_column, how="left") - .merge(reward_target_df, on=join_column, how="left") - .merge(service_fee_df, on=join_column, how="left") - ) - - # 4. Add slippage from fees to slippage - merged_df["eth_slippage_wei"] = ( - merged_df["eth_slippage_wei"].fillna(0) + merged_df["network_fee_eth"] - ) - - # 5. Compute buffer accounting target - merged_df["buffer_accounting_target"] = np.where( - merged_df["pool_address"] != config.reward_config.cow_bonding_pool.address, - merged_df["solver"], - merged_df["reward_target"], - ) - - # 6. Add reward token address - merged_df["reward_token_address"] = ( - config.reward_config.reward_token_address.address - ) - - merged_df["service_fee"] = merged_df["service_fee"].fillna(Fraction(0, 1)) # type: ignore - - return merged_df - - -def construct_solver_payouts_dataframe( +def compute_solver_payouts( solver_info: DataFrame, rewards: DataFrame, protocol_fees: DataFrame, buffer_accounting: DataFrame, ) -> DataFrame: + """Combines solver accounting data into one payment dataframe.""" # 1. Validate data # 2. Normalize Join Column (and Ethereum Address Field) @@ -596,46 +418,6 @@ def construct_solver_payouts_dataframe( return solver_payouts -def construct_partner_fee_payments( - partner_fees_df: DataFrame, config: AccountingConfig -) -> tuple[dict[str, int], int]: - """Compute actual partner fee payments taking partner fee tax into account - The result is a tuple. The first entry is a dictionary that contains the destination address of - a partner as a key, and the value is the amount in wei to be transferred to that address, stored - as an int. The second entry is the total amount of partner fees charged. - """ - - partner_fees_wei: dict[str, int] = {} - for _, row in partner_fees_df.iterrows(): - if row["partner_list"] is None: - continue - - # We assume the two lists used below, i.e., - # partner_list and partner_fee_eth, - # are "aligned". - - for i in range(len(row["partner_list"])): - address = row["partner_list"][i] - if address in partner_fees_wei: - partner_fees_wei[address] += int(row["partner_fee_eth"][i]) - else: - partner_fees_wei[address] = int(row["partner_fee_eth"][i]) - total_partner_fee_wei_untaxed = 0 - total_partner_fee_wei_taxed = 0 - for address, value in partner_fees_wei.items(): - total_partner_fee_wei_untaxed += value - if address == config.protocol_fee_config.reduced_cut_address: - reduction_factor = 1 - config.protocol_fee_config.partner_fee_reduced_cut - partner_fees_wei[address] = int(reduction_factor * value) - total_partner_fee_wei_taxed += int(reduction_factor * value) - else: - reduction_factor = 1 - config.protocol_fee_config.partner_fee_cut - partner_fees_wei[address] = int(reduction_factor * value) - total_partner_fee_wei_taxed += int(reduction_factor * value) - - return partner_fees_wei, total_partner_fee_wei_untaxed - - def summarize_payments(solver_payouts: DataFrame, partner_payouts: DataFrame): performance_reward = solver_payouts["primary_reward_cow"].sum() quote_reward = solver_payouts["quote_reward_cow"].sum() @@ -671,6 +453,8 @@ def construct_payouts( """Workflow of solver reward payout logic post-CIP27""" # pylint: disable-msg=too-many-locals + # fetch data + # TODO: move data fetching into respective files for quote_rewards_df = orderbook.get_quote_rewards(dune.start_block, dune.end_block) batch_data = orderbook.get_solver_rewards( dune.start_block, @@ -678,50 +462,29 @@ def construct_payouts( config.reward_config.batch_reward_cap_upper, config.reward_config.batch_reward_cap_lower, ) - batch_rewards_df = batch_data[ - ["solver", "primary_reward_eth", "protocol_fee_eth", "network_fee_eth"] - ] - partner_fees_df = batch_data[["partner_list", "partner_fee_eth"]] - - assert batch_rewards_df["solver"].is_unique, "solver not unique in batch rewards" - assert quote_rewards_df["solver"].is_unique, "solver not unique in quote rewards" - merged_df = pandas.merge( - quote_rewards_df, batch_rewards_df, on="solver", how="outer" - ).fillna(0) - service_fee_df = pandas.DataFrame(dune.get_service_fee_status()) - service_fee_df["service_fee"] = [ - service_fee_flag * config.reward_config.service_fee_factor - for service_fee_flag in service_fee_df["service_fee"] - ] reward_target_df = pandas.DataFrame(dune.get_vouches()) - # construct slippage df - if ignore_slippage_flag or (not config.buffer_accounting_config.include_slippage): - slippage_df_temp = pandas.merge( - merged_df[["solver"]], - reward_target_df[["solver", "solver_name"]], - on="solver", - how="inner", - ) - slippage_df = slippage_df_temp.assign( - eth_slippage_wei=[0] * slippage_df_temp.shape[0] - ) - else: + + # fetch slippage only if configured to do so + # otherwise set to an empty dataframe + if config.buffer_accounting_config.include_slippage and not ignore_slippage_flag: slippage_df = pandas.DataFrame(dune.get_period_slippage()) # TODO - After CIP-20 phased in, adapt query to return `solver` like all the others slippage_df = slippage_df.rename(columns={"solver_address": "solver"}) + else: + slippage_df = DataFrame(columns=["solver", "eth_slippage_wei"]) + # fetch conversion price + exchange_rate_native_to_cow = fetch_exchange_rate(dune.period.end, config) reward_token = config.reward_config.reward_token_address native_token = Address(config.payment_config.weth_address) price_day = dune.period.end - timedelta(days=1) exchange_rate_native_to_cow = exchange_rate_atoms( native_token, reward_token, price_day ) - converter = TokenConversion( - eth_to_token=lambda t: exchange_rate_native_to_cow * t, - ) + # compute individual components of payments solver_info = compute_solver_info( dune.period.start, reward_target_df, @@ -739,69 +502,17 @@ def construct_payouts( ) buffer_accounting = compute_buffer_accounting(batch_data, slippage_df) - solver_payouts = construct_solver_payouts_dataframe( + # combine into solver payouts and partner payouts + solver_payouts = compute_solver_payouts( solver_info, rewards, protocol_fees, buffer_accounting ) - partner_payouts = compute_partner_fees(batch_data, config.protocol_fee_config) summarize_payments(solver_payouts, partner_payouts) - payouts = prepare_transfers_new( - solver_payouts, partner_payouts, dune.period, config - ) - complete_payout_df = construct_payout_dataframe( - # Fetch and extend auction data from orderbook. - payment_df=extend_payment_df( - pdf=merged_df, - # provide token conversion functions (ETH <--> COW) - converter=converter, - config=config, - ), - # Dune: Fetch Solver Slippage & Reward Targets - slippage_df=slippage_df, - reward_target_df=reward_target_df, - service_fee_df=service_fee_df, - config=config, - ) - # Sort by solver before breaking this data frame into Transfer objects. - complete_payout_df = complete_payout_df.sort_values("solver") - - # compute partner fees - partner_fees_wei, total_partner_fee_wei_untaxed = construct_partner_fee_payments( - partner_fees_df, config - ) - raw_protocol_fee_wei = int(complete_payout_df.protocol_fee_eth.sum()) - final_protocol_fee_wei = raw_protocol_fee_wei - total_partner_fee_wei_untaxed - total_partner_fee_wei_taxed = sum(partner_fees_wei.values()) - partner_fee_tax_wei = total_partner_fee_wei_untaxed - total_partner_fee_wei_taxed + # create transfers and overdrafts + payouts = prepare_transfers(solver_payouts, partner_payouts, dune.period, config) - performance_reward = complete_payout_df["primary_reward_cow"].sum() - quote_reward = complete_payout_df["quote_reward_cow"].sum() - - service_fee = sum( - RewardAndPenaltyDatum.from_series(payment).total_service_fee() - for _, payment in complete_payout_df.iterrows() - ) - - log_saver.print( - "Payment breakdown (ignoring service fees):\n" - f"Performance Reward: {performance_reward / 10 ** 18:.4f}\n" - f"Quote Reward: {quote_reward / 10 ** 18:.4f}\n" - f"Protocol Fees: {final_protocol_fee_wei / 10 ** 18:.4f}\n" - f"Partner Fees Tax: {partner_fee_tax_wei / 10 ** 18:.4f}\n" - f"Partner Fees: {total_partner_fee_wei_taxed / 10 ** 18:.4f}\n" - f"COW DAO Service Fees: {service_fee / 10 ** 18:.4f}\n", - category=Category.TOTALS, - ) - payouts_old = prepare_transfers( - complete_payout_df, - dune.period, - final_protocol_fee_wei, - partner_fee_tax_wei, - partner_fees_wei, - config, - ) for overdraft in payouts.overdrafts: log_saver.print(str(overdraft), Category.OVERDRAFT) return payouts.transfers From 0bfae7d0a233e1aaddb98cc0805d82bbc8e0febb Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 21 Nov 2024 11:32:40 +0100 Subject: [PATCH 07/13] rename argument to solver info function --- src/fetch/payouts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 3ddcc9cb..6521d0ac 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -492,7 +492,7 @@ def construct_payouts( config, ) rewards = compute_rewards( - batch_rewards_df, + batch_data, quote_rewards_df, exchange_rate_native_to_cow, config.reward_config, From 62baef7f418d425bbacf2222f262208d05b979fa Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 21 Nov 2024 11:33:05 +0100 Subject: [PATCH 08/13] validate new dataframes --- src/fetch/payouts.py | 48 +++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 6521d0ac..7035f916 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -15,11 +15,14 @@ from src.config import AccountingConfig from src.fetch.dune import DuneFetcher from src.fetch.prices import exchange_rate_atoms -from src.fetch.solver_info import compute_solver_info -from src.fetch.rewards import compute_rewards -from src.fetch.protocol_fees import compute_protocol_fees +from src.fetch.solver_info import SOLVER_INFO_COLUMNS, compute_solver_info +from src.fetch.rewards import REWARDS_COLUMNS, compute_rewards +from src.fetch.protocol_fees import PROTOCOL_FEES_COLUMNS, compute_protocol_fees from src.fetch.partner_fees import compute_partner_fees -from src.fetch.buffer_accounting import compute_buffer_accounting +from src.fetch.buffer_accounting import ( + BUFFER_ACCOUNTING_COLUMNS, + compute_buffer_accounting, +) from src.logger import log_saver from src.models.accounting_period import AccountingPeriod from src.models.overdraft import Overdraft @@ -348,29 +351,27 @@ def fetch_exchange_rate(period_end: datetime, config: AccountingConfig) -> Fract def validate_df_columns( - payment_df: DataFrame, - slippage_df: DataFrame, - reward_target_df: DataFrame, - service_fee_df: DataFrame, + solver_info: DataFrame, + rewards: DataFrame, + protocol_fees: DataFrame, + buffer_accounting: DataFrame, ) -> None: - """ + """Validate data frame columns. Since we are working with dataframes rather than concrete objects, we validate that the expected columns/fields are available within our datasets. - While it is ok for the input data to contain more columns, - this method merely validates that the expected ones are there. """ - assert PAYMENT_COLUMNS.issubset( - set(payment_df.columns) - ), f"Payment validation failed with columns: {set(payment_df.columns)}" - assert SLIPPAGE_COLUMNS.issubset( - set(slippage_df.columns) - ), f"Slippage validation Failed with columns: {set(slippage_df.columns)}" - assert REWARD_TARGET_COLUMNS.issubset( - set(reward_target_df.columns) - ), f"Reward Target validation Failed with columns: {set(reward_target_df.columns)}" - assert SERVICE_FEE_COLUMNS.issubset( - set(service_fee_df.columns) - ), f"Service Fee validation Failed with columns: {set(service_fee_df.columns)}" + assert set(solver_info.columns) == set( + SOLVER_INFO_COLUMNS + ), f"Solver info validation failed with columns: {set(solver_info.columns)}" + assert set(rewards.columns) == set( + REWARDS_COLUMNS + ), f"Rewards validation failed with columns: {set(rewards.columns)}" + assert set(protocol_fees.columns) == set( + PROTOCOL_FEES_COLUMNS + ), f"Protocol fee validation failed with columns: {set(protocol_fees.columns)}" + assert set(buffer_accounting.columns) == set( + BUFFER_ACCOUNTING_COLUMNS + ), f"Buffer accounting validation failed with columns: {set(buffer_accounting.columns)}" def normalize_address_field(frame: DataFrame, column_name: str) -> None: @@ -386,6 +387,7 @@ def compute_solver_payouts( ) -> DataFrame: """Combines solver accounting data into one payment dataframe.""" # 1. Validate data + validate_df_columns(solver_info, rewards, protocol_fees, buffer_accounting) # 2. Normalize Join Column (and Ethereum Address Field) join_column = "solver" From 1537eefd1a14d4e6f4c2ac642a2554c7b0cf82d2 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 21 Nov 2024 13:32:02 +0100 Subject: [PATCH 09/13] remove unused argument --- src/fetch/payouts.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 7035f916..ed3f5875 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -488,7 +488,6 @@ def construct_payouts( # compute individual components of payments solver_info = compute_solver_info( - dune.period.start, reward_target_df, service_fee_df, config, From d9714990d277dbc6a6a16919edd29525f9929286 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 21 Nov 2024 13:32:11 +0100 Subject: [PATCH 10/13] fix lint errors --- src/fetch/payouts.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index ed3f5875..014b3850 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -274,7 +274,7 @@ def as_payouts(self) -> list[Transfer]: return result -def prepare_transfers( +def prepare_transfers( # pylint: disable=too-many-locals solver_payouts: DataFrame, partner_payouts: DataFrame, period: AccountingPeriod, @@ -415,12 +415,15 @@ def compute_solver_payouts( solver_payouts["slippage_eth"] = solver_payouts["slippage_eth"].fillna(0) solver_payouts["protocol_fee_eth"] = solver_payouts["protocol_fee_eth"].fillna(0) solver_payouts["network_fee_eth"] = solver_payouts["network_fee_eth"].fillna(0) - solver_payouts["service_fee"] = solver_payouts["service_fee"].fillna(Fraction(0, 1)) # type: ignore + solver_payouts["service_fee"] = solver_payouts["service_fee"].fillna( + Fraction(0, 1) # type: ignore + ) return solver_payouts -def summarize_payments(solver_payouts: DataFrame, partner_payouts: DataFrame): +def summarize_payments(solver_payouts: DataFrame, partner_payouts: DataFrame) -> None: + """Summarize payments.""" performance_reward = solver_payouts["primary_reward_cow"].sum() quote_reward = solver_payouts["quote_reward_cow"].sum() protocol_fee = solver_payouts["protocol_fee_eth"].sum() From 9f95bb22b6693f74822fadb85fac5921349a1e17 Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 21 Nov 2024 15:04:16 +0100 Subject: [PATCH 11/13] remove unused constants --- src/fetch/payouts.py | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 014b3850..d0d7db62 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -32,35 +32,6 @@ from src.utils.print_store import Category -PAYMENT_COLUMNS = { - "solver", - "primary_reward_eth", - "primary_reward_cow", - "quote_reward_cow", - "protocol_fee_eth", - "network_fee_eth", -} -SLIPPAGE_COLUMNS = { - "solver", - "solver_name", - "eth_slippage_wei", -} -REWARD_TARGET_COLUMNS = {"solver", "reward_target", "pool_address"} -SERVICE_FEE_COLUMNS = {"solver", "service_fee"} -ADDITIONAL_PAYMENT_COLUMNS = {"buffer_accounting_target", "reward_token_address"} - -COMPLETE_COLUMNS = ( - PAYMENT_COLUMNS.union(SLIPPAGE_COLUMNS) - .union(REWARD_TARGET_COLUMNS) - .union(ADDITIONAL_PAYMENT_COLUMNS) -) -NUMERICAL_COLUMNS = [ - "primary_reward_eth", - "primary_reward_cow", - "quote_reward_cow", - "protocol_fee_eth", -] - SOLVER_PAYOUTS_COLUMNS = [ "solver", "primary_reward_eth", From 638eb4532216add374618bdccc7b5f47368c9aeb Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 5 Dec 2024 10:28:05 +0100 Subject: [PATCH 12/13] fix merge --- src/fetch/payouts.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index 567d9dd6..a41b7db9 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -245,10 +245,6 @@ def as_payouts(self) -> list[Transfer]: return result - pdf["quote_reward_cow"] = ( - min( - config.reward_config.quote_reward_cow, - converter.eth_to_token(config.reward_config.quote_reward_cap_native), def prepare_transfers( # pylint: disable=too-many-locals solver_payouts: DataFrame, partner_payouts: DataFrame, From e2e51c8fe81fcda46fa7ffaa28139597e3e88d0d Mon Sep 17 00:00:00 2001 From: Felix Henneke Date: Thu, 5 Dec 2024 17:33:27 +0100 Subject: [PATCH 13/13] move exchange reates into its own function --- src/fetch/payouts.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/fetch/payouts.py b/src/fetch/payouts.py index a67292d2..69551931 100644 --- a/src/fetch/payouts.py +++ b/src/fetch/payouts.py @@ -313,12 +313,21 @@ def prepare_transfers( # pylint: disable=too-many-locals return PeriodPayouts(overdrafts, transfers) -def fetch_exchange_rate(period_end: datetime, config: AccountingConfig) -> Fraction: +def fetch_exchange_rates( + period_end: datetime, config: AccountingConfig +) -> tuple[Fraction, Fraction]: """Fetch exchange rate for converting the native token to COW.""" reward_token = config.reward_config.reward_token_address - native_token = Address(config.payment_config.weth_address) + native_token = Address(config.payment_config.wrapped_native_token_address) + wrapped_eth = config.payment_config.wrapped_eth_address price_day = period_end - timedelta(days=1) - return exchange_rate_atoms(native_token, reward_token, price_day) + exchange_rate_native_to_cow = exchange_rate_atoms( + native_token, reward_token, price_day + ) + exchange_rate_native_to_eth = exchange_rate_atoms( + native_token, wrapped_eth, price_day + ) + return exchange_rate_native_to_cow, exchange_rate_native_to_eth def validate_df_columns( @@ -464,15 +473,8 @@ def construct_payouts( slippage_df = DataFrame(columns=["solver", "eth_slippage_wei"]) # fetch conversion price - reward_token = config.reward_config.reward_token_address - native_token = Address(config.payment_config.wrapped_native_token_address) - wrapped_eth = config.payment_config.wrapped_eth_address - price_day = dune.period.end - timedelta(days=1) - exchange_rate_native_to_cow = exchange_rate_atoms( - native_token, reward_token, price_day - ) - exchange_rate_native_to_eth = exchange_rate_atoms( - native_token, wrapped_eth, price_day + exchange_rate_native_to_cow, exchange_rate_native_to_eth = fetch_exchange_rates( + dune.period.end, config ) # compute individual components of payments