-
Notifications
You must be signed in to change notification settings - Fork 0
/
wrapper_cryogan.py
147 lines (106 loc) · 6.15 KB
/
wrapper_cryogan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
from cryogan import CryoGAN
from saveimage_utils import save_fig_double
import os
import mrcfile
import shutil
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import torch
from utils import mean_snr_calculator, dict2cuda
import pytorch3d
from pytorch3d.transforms import so3_relative_angle
from writer_utils import writer_image_add_dict
from src.dataio import dataloader
class CryoganWrapper():
def __init__(self, config):
super(CryoganWrapper, self).__init__()
self.config = config
self.cryogan = CryoGAN(config)
self.cryogan.init_encoder()
self.cryogan.init_gen()
self.cryogan.init_dis()
self.cryogan.to(self.config.device)
self.gt_loader, self.noise_loader=dataloader( config)
self.init_scheduler(self.cryogan)
self.init_path()
def run(self):
total_epochs=400
per_epoch_iteration=len(self.gt_loader)//(self.config.dis_iterations+1)
max_iter=total_epochs*per_epoch_iteration
iteration=-1
for epoch in range(total_epochs):
iter_loader=zip(self.gt_loader,self.noise_loader )
for _ in range(per_epoch_iteration):
iteration+=1
for dis_iter in range(self.config.dis_iterations + 1):
gt_data, fake_params=next(iter_loader)
gt_data=dict2cuda(gt_data)
fake_params=dict2cuda(fake_params)
train_all = dis_iter == self.config.dis_iterations
loss_dict, fake_data, self.writer = self.cryogan.train(gt_data, fake_params, max_iter, iteration,
self.writer, train_all)
if self.config.cryogan:
self.scheduler_dis.step()
self.scheduler_gen.step()
self.scheduler_encoder.step()
if iteration % 500 == 0:
self.plot_images(gt_data, fake_data, iteration)
if self.config.cryogan:
wass_loss=loss_dict["loss_wass"]
print(f"iter: {iteration} loss_wass: {wass_loss}")
else:
print(f"iter: {iteration}")
self.writer.close()
def writer_add(self, loss_dict, iteration):
for keys in loss_dict:
self.writer.add_scalar("loss/" + keys, loss_dict[keys], iteration)
def plot_images(self, gt_data, fake_data, rec_data, iteration):
self.writer = writer_image_add_dict(self.writer, gt_data, fake_data, rec_data, iteration)
if fake_data is not None:
save_fig_double(gt_data["proj"].cpu().data, fake_data["proj"].detach().cpu().data,
self.OUTPUT_PATH, "Proj", iteration=str(iteration).zfill(6),
Title1='Real', Title2='Fake_' + str(iteration),
doCurrent=True, sameColorbar=False)
save_fig_double(gt_data["clean"].cpu().data, fake_data["clean"].detach().cpu().data,
self.OUTPUT_PATH, "Proj_clean", iteration=str(iteration).zfill(6),
Title1='Real_clean', Title2='fake_clean' + str(iteration),
doCurrent=True, sameColorbar=False)
if rec_data is not None:
with torch.no_grad():
loss_rec_clean=(rec_data["clean"]-gt_data["clean"]).pow(2).sum()/self.config.batch_size
self.writer.add_scalar("loss/loss_rec_clean", loss_rec_clean, iteration)
save_fig_double(gt_data["clean"].cpu().data, rec_data["clean"].detach().cpu().data,
self.OUTPUT_PATH, "Proj_rec", iteration=str(iteration).zfill(6),
Title1='Real_clean', Title2='rec_clean' + str(iteration),
doCurrent=True, sameColorbar=False)
volume_path=self.OUTPUT_PATH + '/'+str(iteration).zfill(6)+"_volume.mrc"
with mrcfile.new(volume_path, overwrite=True) as m:
m.set_data(self.cryogan.gen.projector.vol.detach().cpu().numpy())
curr_volume_path=self.OUTPUT_PATH + '/current_volume.mrc'
shutil.copy(volume_path, curr_volume_path)
torch.save(self.cryogan.encoder, self.OUTPUT_PATH + "/Encoder.pt")
torch.save(self.cryogan.gen.noise.scalar, self.OUTPUT_PATH + "/scalar.pt")
def init_path(self):
for path in ["/logs/", "/figs/"]:
OUTPUT_PATH = os.getcwd() + path
if os.path.exists(OUTPUT_PATH) == False: os.mkdir(OUTPUT_PATH)
OUTPUT_PATH = OUTPUT_PATH + self.config.exp_name
if os.path.exists(OUTPUT_PATH) == False: os.mkdir(OUTPUT_PATH)
if "logs" in path:
self.writer = SummaryWriter(log_dir=OUTPUT_PATH)
self.OUTPUT_PATH = OUTPUT_PATH
shutil.copy(self.config.config_path, self.OUTPUT_PATH)
def init_scheduler(self, cryoposegan):
self.scheduler_dis = torch.optim.lr_scheduler.StepLR(self.cryogan.dis_optim,
step_size=self.config.scheduler_step_size * self.config.dis_iterations,
gamma=self.config.scheduler_gamma)
self.scheduler_gen = torch.optim.lr_scheduler.StepLR(self.cryogan.gen_optim,
step_size=self.config.scheduler_step_size,
gamma=self.config.scheduler_gamma)
self.scheduler_encoder = torch.optim.lr_scheduler.StepLR(self.cryogan.encoder_optim,
step_size=self.config.scheduler_step_size,
gamma=self.config.scheduler_gamma)
# def init_with_gt(self):
# if self.config.init_with_gt:
# with torch.no_grad():
# self.cryoposegan.gen.projector.vol[:, :, :] = self.GT.vol[:, :, :]