Skip to content

Commit

Permalink
Merge pull request #50 from gizatechxyz/feat/load_contract_w_abi
Browse files Browse the repository at this point in the history
Feat/load contract w abi
  • Loading branch information
Gonmeso authored May 29, 2024
2 parents 8e8c675 + ef7121a commit 8e5fb6a
Show file tree
Hide file tree
Showing 6 changed files with 116 additions and 88 deletions.
18 changes: 10 additions & 8 deletions examples/agents/action_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,19 @@

# Process the image
def process_image(img):
img = np.resize(img, (28,28))
img = img.reshape(1,1,28,28)
img = img/255.0
img = np.resize(img, (28, 28))
img = img.reshape(1, 1, 28, 28)
img = img / 255.0
print(img.shape)
# For now, we will just use a small tensor as input to a single-layer softmax. We will change this when the PoC works
tensor = np.random.rand(1,3)
tensor = np.random.rand(1, 3)
return tensor



# Get the image
def get_image(path):
with Image.open(path) as img:
img = img.convert('L')
img = img.convert("L")
img = np.array(img)
return img

Expand All @@ -38,7 +39,7 @@ def transmission():
id=id,
chain="ethereum:sepolia:geth",
version_id=version,
account=account
account=account,
)

result = agent.predict(input_feed={"image": img}, verifiable=True)
Expand All @@ -53,4 +54,5 @@ def transmission():
pprint.pprint(contract_result.__dict__)
logger.info("Finished")

transmission()

transmission()
2 changes: 1 addition & 1 deletion examples/agents/read_contracts.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
contracts={
"mnist": "0x17807a00bE76716B91d5ba1232dd1647c4414912",
"token": "0xeF7cCAE97ea69F5CdC89e496b0eDa2687C95D93B",
},
},
chain="ethereum:sepolia:geth",
account=ACCOUNT_ALIAS,
)
Expand Down
27 changes: 17 additions & 10 deletions examples/uni_v3_lp/action_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,23 @@
VERSION_ID = ... # Update with your version ID


@task(name="Data processing")
@task
def process_data(realized_vol, dec_price_change):
pct_change_sq = (100 * dec_price_change) ** 2
X = np.array([[realized_vol, pct_change_sq]])

return X


# Get image
@task(name="Get volatility and price change data")
@task
def get_data():
# TODO: implement fetching onchain or from some other source
realized_vol = 4.20
dec_price_change = 0.1
return realized_vol, dec_price_change


@task(name="Create a Giza agent for the Volatility prediction model")
@task
def create_agent(
model_id: int, version_id: int, chain: str, contracts: dict, account: str
):
Expand All @@ -55,7 +54,7 @@ def create_agent(
return agent


@task(name="Predict the digit in an image.")
@task
def predict(agent: GizaAgent, X: np.ndarray):
"""
Predict the digit in an image.
Expand All @@ -70,7 +69,7 @@ def predict(agent: GizaAgent, X: np.ndarray):
return prediction


@task(name="Get the value from the prediction.")
@task
def get_pred_val(prediction: AgentResult):
"""
Get the value from the prediction.
Expand All @@ -86,7 +85,7 @@ def get_pred_val(prediction: AgentResult):


# Create Action
@action(log_prints=True)
@action
def transmission(
pred_model_id,
pred_version_id,
Expand All @@ -111,9 +110,10 @@ def transmission(
logger.info(f"Input data: {realized_vol}, {dec_price_change}")
X = process_data(realized_vol, dec_price_change)

nft_manager_abi_path = "nft_manager_abi.json"
contracts = {
"nft_manager": nft_manager_address,
"tokenA": tokenA_address,
"nft_manager": [nft_manager_address, nft_manager_abi_path],
"tokenA": [tokenA_address],
"tokenB": tokenB_address,
"pool": pool_address,
}
Expand Down Expand Up @@ -147,7 +147,14 @@ def transmission(
curr_tick, predicted_value, tokenA_decimals, tokenB_decimals, pool_fee
)
mint_params = get_mint_params(
user_address, tokenA_amount, tokenB_amount, pool_fee, lower_tick, upper_tick
tokenA_address,
tokenB_address,
user_address,
tokenA_amount,
tokenB_amount,
pool_fee,
lower_tick,
upper_tick,
)
# step 5: mint new position
logger.info("Minting new position...")
Expand Down
87 changes: 45 additions & 42 deletions examples/uni_v3_lp/mint_position.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

def get_mint_params(
user_address,
token0_address,
token1_address,
amount0,
amount1,
pool_fee,
Expand All @@ -25,8 +27,8 @@ def get_mint_params(
if deadline is None:
deadline = int(time.time()) + 60
mint_params = {
"token0": tokenA.address,
"token1": tokenB.address,
"token0": token0_address,
"token1": token1_address,
"fee": pool_fee,
"tickLower": lower_tick,
"tickUpper": upper_tick,
Expand Down Expand Up @@ -74,44 +76,45 @@ def close_position(user_address, nft_manager, nft_id):
nft_manager.collect((nft_id, user_address, MAX_UINT_128, MAX_UINT_128))


networks.parse_network_choice(f"ethereum:sepolia:{sepolia_rpc_url}").__enter__()
chain_id = chain.chain_id
if __name__ == "__main__":
networks.parse_network_choice(f"ethereum:sepolia:{sepolia_rpc_url}").__enter__()
chain_id = chain.chain_id

# step 1: set params
tokenA_amount = 1000
tokenB_amount = 1000
pct_dev = 0.1
pool_fee = 3000
# step 2: load contracts
tokenA = Contract(ADDRESSES["UNI"][chain_id])
tokenB = Contract(ADDRESSES["WETH"][chain_id])
nft_manager = Contract(ADDRESSES["NonfungiblePositionManager"][chain_id])
pool_factory = Contract(ADDRESSES["PoolFactory"][chain_id])
pool_address = pool_factory.getPool(tokenA.address, tokenB.address, pool_fee)
pool = Contract(pool_address)
dev = accounts.load("dev")
dev.set_autosign(True, passphrase=dev_passphrase)
user_address = dev.address
with accounts.use_sender("dev"):
# step 3: fetch open positions
positions = get_all_user_positions(nft_manager, user_address)
print(f"Fouund the following open positions: {positions}")
# step 4: close all positions
print("Closing all open positions...")
for nft_id in positions:
close_position(user_address, nft_manager, nft_id)
# step 4: calculate mint params
print("Calculating mint params...")
_, curr_tick, _, _, _, _, _ = pool.slot0()
tokenA_decimals = tokenA.decimals()
tokenB_decimals = tokenB.decimals()
curr_price = tick_to_price(curr_tick, tokenA_decimals, tokenB_decimals)
lower_tick, upper_tick = get_tick_range(
curr_tick, pct_dev, tokenA_decimals, tokenB_decimals, pool_fee
)
mint_params = get_mint_params(
user_address, tokenA_amount, tokenB_amount, pool_fee, lower_tick, upper_tick
)
# step 5: mint new position
print("Minting new position...")
nft_manager.mint(mint_params)
# step 1: set params
tokenA_amount = 1000
tokenB_amount = 1000
pct_dev = 0.1
pool_fee = 3000
# step 2: load contracts
tokenA = Contract(ADDRESSES["UNI"][chain_id])
tokenB = Contract(ADDRESSES["WETH"][chain_id])
nft_manager = Contract(ADDRESSES["NonfungiblePositionManager"][chain_id])
pool_factory = Contract(ADDRESSES["PoolFactory"][chain_id])
pool_address = pool_factory.getPool(tokenA.address, tokenB.address, pool_fee)
pool = Contract(pool_address)
dev = accounts.load("dev")
dev.set_autosign(True, passphrase=dev_passphrase)
user_address = dev.address
with accounts.use_sender("dev"):
# step 3: fetch open positions
positions = get_all_user_positions(nft_manager, user_address)
print(f"Fouund the following open positions: {positions}")
# step 4: close all positions
print("Closing all open positions...")
for nft_id in positions:
close_position(user_address, nft_manager, nft_id)
# step 4: calculate mint params
print("Calculating mint params...")
_, curr_tick, _, _, _, _, _ = pool.slot0()
tokenA_decimals = tokenA.decimals()
tokenB_decimals = tokenB.decimals()
curr_price = tick_to_price(curr_tick, tokenA_decimals, tokenB_decimals)
lower_tick, upper_tick = get_tick_range(
curr_tick, pct_dev, tokenA_decimals, tokenB_decimals, pool_fee
)
mint_params = get_mint_params(
user_address, tokenA_amount, tokenB_amount, pool_fee, lower_tick, upper_tick
)
# step 5: mint new position
print("Minting new position...")
nft_manager.mint(mint_params)
39 changes: 19 additions & 20 deletions examples/verifiable_mnist/deployments/pytorch_mnist_deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from torch.utils.data import DataLoader, TensorDataset


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

input_size = 196 # 14x14
hidden_size = 10
Expand Down Expand Up @@ -39,30 +39,29 @@ def resize_images(images):
async def prepare_datasets():
print("Prepare dataset ...")

train_dataset = torchvision.datasets.MNIST(
root='./data', train=True, download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False)
train_dataset = torchvision.datasets.MNIST(root="./data", train=True, download=True)
test_dataset = torchvision.datasets.MNIST(root="./data", train=False)

x_train = resize_images(train_dataset)
x_test = resize_images(test_dataset)

x_train = torch.tensor(x_train.reshape(-1, 14*14).astype('float32') / 255)
y_train = torch.tensor(
[label for _, label in train_dataset], dtype=torch.long)
x_train = torch.tensor(x_train.reshape(-1, 14 * 14).astype("float32") / 255)
y_train = torch.tensor([label for _, label in train_dataset], dtype=torch.long)

x_test = torch.tensor(x_test.reshape(-1, 14*14).astype('float32') / 255)
y_test = torch.tensor(
[label for _, label in test_dataset], dtype=torch.long)
x_test = torch.tensor(x_test.reshape(-1, 14 * 14).astype("float32") / 255)
y_test = torch.tensor([label for _, label in test_dataset], dtype=torch.long)
return x_train, y_train, x_test, y_test


async def create_data_loaders(x_train, y_train, x_test, y_test):
print("Create data loaders ...")

train_loader = DataLoader(TensorDataset(
x_train, y_train), batch_size=batch_size, shuffle=True)
test_loader = DataLoader(TensorDataset(
x_test, y_test), batch_size=batch_size, shuffle=False)
train_loader = DataLoader(
TensorDataset(x_train, y_train), batch_size=batch_size, shuffle=True
)
test_loader = DataLoader(
TensorDataset(x_test, y_test), batch_size=batch_size, shuffle=False
)
return train_loader, test_loader


Expand All @@ -75,7 +74,7 @@ async def train_model(train_loader):

for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device).reshape(-1, 14*14)
images = images.to(device).reshape(-1, 14 * 14)
labels = labels.to(device)

outputs = model(images)
Expand All @@ -87,7 +86,8 @@ async def train_model(train_loader):

if (i + 1) % 100 == 0:
print(
f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}')
f"Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}"
)
return model


Expand All @@ -96,21 +96,20 @@ async def test_model(model, test_loader):
n_correct = 0
n_samples = 0
for images, labels in test_loader:
images = images.to(device).reshape(-1, 14*14)
images = images.to(device).reshape(-1, 14 * 14)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
n_samples += labels.size(0)
n_correct += (predicted == labels).sum().item()

acc = 100.0 * n_correct / n_samples
print(f'Accuracy of the network on the 10000 test images: {acc} %')
print(f"Accuracy of the network on the 10000 test images: {acc} %")


def execution():
x_train, y_train, x_test, y_test = prepare_datasets()
train_loader, test_loader = create_data_loaders(
x_train, y_train, x_test, y_test)
train_loader, test_loader = create_data_loaders(x_train, y_train, x_test, y_test)
model = train_model(train_loader)
test_model(model, test_loader)

Expand Down
Loading

0 comments on commit 8e5fb6a

Please sign in to comment.