From c67cce424d3ba5241c4bf4b8e358526970ee5f2a Mon Sep 17 00:00:00 2001 From: Salem Date: Thu, 21 Mar 2024 20:34:17 +0400 Subject: [PATCH 01/13] make explicit what to save during sampling --- src/gfn/containers/trajectories.py | 8 ++- src/gfn/containers/transitions.py | 6 +- src/gfn/gflownet/base.py | 70 ++++++++++++++-------- src/gfn/gflownet/detailed_balance.py | 47 ++++++++++----- src/gfn/gflownet/flow_matching.py | 6 +- src/gfn/gflownet/sub_trajectory_balance.py | 3 +- src/gfn/gflownet/trajectory_balance.py | 24 ++++---- src/gfn/samplers.py | 33 +++++----- 8 files changed, 123 insertions(+), 74 deletions(-) diff --git a/src/gfn/containers/trajectories.py b/src/gfn/containers/trajectories.py index 02da678a..3e72bb3d 100644 --- a/src/gfn/containers/trajectories.py +++ b/src/gfn/containers/trajectories.py @@ -54,7 +54,7 @@ def __init__( is_backward: bool = False, log_rewards: TT["n_trajectories", torch.float] | None = None, log_probs: TT["max_length", "n_trajectories", torch.float] | None = None, - estimator_outputs: torch.Tensor | None = None, + estimator_outputs: TT["batch_shape", "output_dim", torch.float] | None = None, ) -> None: """ Args: @@ -325,7 +325,11 @@ def to_transitions(self) -> Transitions: ], dim=0, ) - log_probs = self.log_probs[~self.actions.is_dummy] + log_probs = ( + self.log_probs[~self.actions.is_dummy] + if self.log_probs is not None and self.log_probs.nelement() > 0 + else None + ) return Transitions( env=self.env, states=states, diff --git a/src/gfn/containers/transitions.py b/src/gfn/containers/transitions.py index 4b15f05e..ef59d2ab 100644 --- a/src/gfn/containers/transitions.py +++ b/src/gfn/containers/transitions.py @@ -186,7 +186,11 @@ def __getitem__(self, index: int | Sequence[int]) -> Transitions: log_rewards = ( self._log_rewards[index] if self._log_rewards is not None else None ) - log_probs = self.log_probs[index] + log_probs = ( + self.log_probs[index] + if self.log_probs is not None and self.log_probs.nelement() > 0 + else None + ) return Transitions( env=self.env, states=states, diff --git a/src/gfn/gflownet/base.py b/src/gfn/gflownet/base.py index e38bb10a..c3d4cdad 100644 --- a/src/gfn/gflownet/base.py +++ b/src/gfn/gflownet/base.py @@ -24,18 +24,25 @@ class GFlowNet(ABC, nn.Module, Generic[TrainingSampleType]): A formal definition of GFlowNets is given in Sec. 3 of [GFlowNet Foundations](https://arxiv.org/pdf/2111.09266). """ + log_reward_clip_min = float("-inf") # Default off. @abstractmethod def sample_trajectories( - self, env: Env, n_samples: int, sample_off_policy: bool + self, + env: Env, + n_samples: int, + save_logprobs: bool = True, + save_estimator_outputs: bool = False, ) -> Trajectories: """Sample a specific number of complete trajectories. Args: env: the environment to sample trajectories from. n_samples: number of trajectories to be sampled. - sample_off_policy: whether to sample trajectories on / off policy. + save_logprobs: whether to save the logprobs of the actions - useful for on-policy learning. + save_estimator_outputs: whether to save the estimator outputs - useful for off-policy learning + with tempered policy Returns: Trajectories: sampled trajectories object. """ @@ -49,7 +56,9 @@ def sample_terminating_states(self, env: Env, n_samples: int) -> States: Returns: States: sampled terminating states object. """ - trajectories = self.sample_trajectories(env, n_samples, sample_off_policy=False) + trajectories = self.sample_trajectories( + env, n_samples, save_estimator_outputs=False, save_logprobs=False + ) return trajectories.last_states def logz_named_parameters(self): @@ -75,21 +84,26 @@ class PFBasedGFlowNet(GFlowNet[TrainingSampleType]): pb: GFNModule """ - def __init__(self, pf: GFNModule, pb: GFNModule, off_policy: bool): + def __init__(self, pf: GFNModule, pb: GFNModule): super().__init__() self.pf = pf self.pb = pb - self.off_policy = off_policy def sample_trajectories( - self, env: Env, n_samples: int, sample_off_policy: bool, **policy_kwargs + self, + env: Env, + n_samples: int, + save_logprobs: bool = True, + save_estimator_outputs: bool = False, + **policy_kwargs ) -> Trajectories: """Samples trajectories, optionally with specified policy kwargs.""" sampler = Sampler(estimator=self.pf) trajectories = sampler.sample_trajectories( env, n_trajectories=n_samples, - off_policy=sample_off_policy, + save_estimator_outputs=save_estimator_outputs, + save_logprobs=save_logprobs, **policy_kwargs, ) @@ -107,6 +121,7 @@ def get_pfs_and_pbs( self, trajectories: Trajectories, fill_value: float = 0.0, + recalculate_all: bool = False, ) -> Tuple[ TT["max_length", "n_trajectories", torch.float], TT["max_length", "n_trajectories", torch.float], @@ -116,17 +131,16 @@ def get_pfs_and_pbs( More specifically it evaluates $\log P_F (s' \mid s)$ and $\log P_B(s \mid s')$ for each transition in each trajectory in the batch. - Useful when the policy used to sample the trajectories is different from - the one used to evaluate the loss. Otherwise we can use the logprobs directly - from the trajectories. - - Note - for off policy exploration, the trajectories submitted to this method - will be sampled off policy. + Unless recalculate_all=True, in which case we re-evaluate the logprobs of the trajectories with + the current self.pf. The following applies: + - If trajectories have log_probs attribute, use them - this is usually for on-policy learning + - Else, if trajectories have estimator_outputs attribute, transform them + into log_probs - this is usually for off-policy learning with a tempered policy + - Else, if trajectories have none of them, re-evaluate the log_probs + using the current self.pf - this is usually for off-policy learning with replay buffer Args: trajectories: Trajectories to evaluate. - estimator_outputs: Optional stored estimator outputs from previous forward - sampling (encountered, for example, when sampling off policy). fill_value: Value to use for invalid states (i.e. $s_f$ that is added to shorter trajectories). @@ -150,16 +164,19 @@ def get_pfs_and_pbs( if valid_states.batch_shape != tuple(valid_actions.batch_shape): raise AssertionError("Something wrong happening with log_pf evaluations") - if self.off_policy: - # We re-use the values calculated in .sample_trajectories(). - if trajectories.estimator_outputs is not None: + if ( + trajectories.log_probs is not None + and trajectories.log_probs.nelement() > 0 + and not recalculate_all + ): + log_pf_trajectories = trajectories.log_probs + else: + if trajectories.estimator_outputs is not None and not recalculate_all: estimator_outputs = trajectories.estimator_outputs[ ~trajectories.actions.is_dummy ] else: - raise Exception( - "GFlowNet is off policy, but no estimator_outputs found in Trajectories!" - ) + estimator_outputs = self.pf(valid_states) # Calculates the log PF of the actions sampled off policy. valid_log_pf_actions = self.pf.to_probability_distribution( @@ -174,9 +191,6 @@ def get_pfs_and_pbs( ) log_pf_trajectories[~trajectories.actions.is_dummy] = valid_log_pf_actions - else: - log_pf_trajectories = trajectories.log_probs - non_initial_valid_states = valid_states[~valid_states.is_initial_state] non_exit_valid_actions = valid_actions[~valid_actions.is_exit] @@ -201,14 +215,18 @@ def get_pfs_and_pbs( return log_pf_trajectories, log_pb_trajectories def get_trajectories_scores( - self, trajectories: Trajectories + self, + trajectories: Trajectories, + recalculate_all: bool = False, ) -> Tuple[ TT["n_trajectories", torch.float], TT["n_trajectories", torch.float], TT["n_trajectories", torch.float], ]: """Given a batch of trajectories, calculate forward & backward policy scores.""" - log_pf_trajectories, log_pb_trajectories = self.get_pfs_and_pbs(trajectories) + log_pf_trajectories, log_pb_trajectories = self.get_pfs_and_pbs( + trajectories, recalculate_all=recalculate_all + ) assert log_pf_trajectories is not None total_log_pf_trajectories = log_pf_trajectories.sum(dim=0) diff --git a/src/gfn/gflownet/detailed_balance.py b/src/gfn/gflownet/detailed_balance.py index 4cb4e6e2..cba6bfc8 100644 --- a/src/gfn/gflownet/detailed_balance.py +++ b/src/gfn/gflownet/detailed_balance.py @@ -23,7 +23,6 @@ class DBGFlowNet(PFBasedGFlowNet[Transitions]): Attributes: logF: a ScalarEstimator instance. - off_policy: If true, we need to reevaluate the log probs. forward_looking: whether to implement the forward looking GFN loss. log_reward_clip_min: If finite, clips log rewards to this value. """ @@ -33,17 +32,16 @@ def __init__( pf: GFNModule, pb: GFNModule, logF: ScalarEstimator, - off_policy: bool, forward_looking: bool = False, log_reward_clip_min: float = -float("inf"), ): - super().__init__(pf, pb, off_policy=off_policy) + super().__init__(pf, pb) self.logF = logF self.forward_looking = forward_looking self.log_reward_clip_min = log_reward_clip_min def get_scores( - self, env: Env, transitions: Transitions + self, env: Env, transitions: Transitions, recalculate_all: bool = False ) -> Tuple[ TT["n_transitions", float], TT["n_transitions", float], @@ -54,6 +52,12 @@ def get_scores( Args: transitions: a batch of transitions. + Unless recalculate_all=True, in which case we re-evaluate the logprobs of the transitions with + the current self.pf. The following applies: + - If transitions have log_probs attribute, use them - this is usually for on-policy learning + - Else, re-evaluate the log_probs using the current self.pf - this is usually for + off-policy learning with replay buffer + Raises: ValueError: when supplied with backward transitions. AssertionError: when log rewards of transitions are None. @@ -68,19 +72,20 @@ def get_scores( if states.batch_shape != tuple(actions.batch_shape): raise ValueError("Something wrong happening with log_pf evaluations") - if not self.off_policy: + if ( + transitions.log_probs is not None + and transitions.log_probs.nelement() > 0 + and not recalculate_all + ): valid_log_pf_actions = transitions.log_probs else: - # Evaluate the log PF of the actions sampled off policy. - # I suppose the Transitions container should then have some - # estimator_outputs attribute as well, to avoid duplication here ? - # See (#156). - module_output = self.pf(states) # TODO: Inefficient duplication. + # Evaluate the log PF of the actions + module_output = self.pf( + states + ) # TODO: Inefficient duplication in case of tempered policy valid_log_pf_actions = self.pf.to_probability_distribution( states, module_output - ).log_prob( - actions.tensor - ) # Actions sampled off policy. + ).log_prob(actions.tensor) valid_log_F_s = self.logF(states).squeeze(-1) if self.forward_looking: @@ -149,9 +154,17 @@ class ModifiedDBGFlowNet(PFBasedGFlowNet[Transitions]): https://arxiv.org/abs/2202.13903 for more details. """ - def get_scores(self, transitions: Transitions) -> TT["n_trajectories", torch.float]: + def get_scores( + self, transitions: Transitions, recalculate_all: bool = False + ) -> TT["n_trajectories", torch.float]: """DAG-GFN-style detailed balance, when all states are connected to the sink. + Unless recalculate_all=True, in which case we re-evaluate the logprobs of the transitions with + the current self.pf. The following applies: + - If transitions have log_probs attribute, use them - this is usually for on-policy learning + - Else, re-evaluate the log_probs using the current self.pf - this is usually for + off-policy learning with replay buffer + Raises: ValueError: when backward transitions are supplied (not supported). ValueError: when the computed scores contain `inf`. @@ -166,7 +179,11 @@ def get_scores(self, transitions: Transitions) -> TT["n_trajectories", torch.flo all_log_rewards = transitions.all_log_rewards[mask] module_output = self.pf(states) pf_dist = self.pf.to_probability_distribution(states, module_output) - if not self.off_policy: + if ( + transitions.log_probs is not None + and transitions.log_probs.nelement() > 0 + and not recalculate_all + ): valid_log_pf_actions = transitions[mask].log_probs else: # Evaluate the log PF of the actions sampled off policy. diff --git a/src/gfn/gflownet/flow_matching.py b/src/gfn/gflownet/flow_matching.py index 061761d4..5764cb8e 100644 --- a/src/gfn/gflownet/flow_matching.py +++ b/src/gfn/gflownet/flow_matching.py @@ -36,7 +36,8 @@ def __init__(self, logF: DiscretePolicyEstimator, alpha: float = 1.0): def sample_trajectories( self, env: Env, - off_policy: bool, + save_logprobs: bool, + save_estimator_outputs: bool = False, n_samples: int = 1000, **policy_kwargs: Optional[dict], ) -> Trajectories: @@ -49,7 +50,8 @@ def sample_trajectories( trajectories = sampler.sample_trajectories( env, n_trajectories=n_samples, - off_policy=off_policy, + save_estimator_outputs=save_estimator_outputs, + save_logprobs=save_logprobs, **policy_kwargs, ) return trajectories diff --git a/src/gfn/gflownet/sub_trajectory_balance.py b/src/gfn/gflownet/sub_trajectory_balance.py index f07835c3..6e8b1324 100644 --- a/src/gfn/gflownet/sub_trajectory_balance.py +++ b/src/gfn/gflownet/sub_trajectory_balance.py @@ -56,7 +56,6 @@ def __init__( pf: GFNModule, pb: GFNModule, logF: ScalarEstimator, - off_policy: bool, weighting: Literal[ "DB", "ModifiedDB", @@ -70,7 +69,7 @@ def __init__( log_reward_clip_min: float = -float("inf"), forward_looking: bool = False, ): - super().__init__(pf, pb, off_policy=off_policy) + super().__init__(pf, pb) self.logF = logF self.weighting = weighting self.lamda = lamda diff --git a/src/gfn/gflownet/trajectory_balance.py b/src/gfn/gflownet/trajectory_balance.py index dde1b667..45db346c 100644 --- a/src/gfn/gflownet/trajectory_balance.py +++ b/src/gfn/gflownet/trajectory_balance.py @@ -23,7 +23,6 @@ class TBGFlowNet(TrajectoryBasedGFlowNet): the DAG, or a singleton thereof, if self.logit_PB is a fixed DiscretePBEstimator. Attributes: - off_policy: Whether the GFlowNet samples trajectories on or off policy. logZ: a LogZEstimator instance. log_reward_clip_min: If finite, clips log rewards to this value. """ @@ -32,18 +31,19 @@ def __init__( self, pf: GFNModule, pb: GFNModule, - off_policy: bool, init_logZ: float = 0.0, log_reward_clip_min: float = -float("inf"), ): - super().__init__(pf, pb, off_policy=off_policy) + super().__init__(pf, pb) self.logZ = nn.Parameter( torch.tensor(init_logZ) ) # TODO: Optionally, this should be a nn.Module to support conditional GFNs. self.log_reward_clip_min = log_reward_clip_min - def loss(self, env: Env, trajectories: Trajectories) -> TT[0, float]: + def loss( + self, env: Env, trajectories: Trajectories, recalculate_all: bool = False + ) -> TT[0, float]: """Trajectory balance loss. The trajectory balance loss is described in 2.3 of @@ -53,7 +53,9 @@ def loss(self, env: Env, trajectories: Trajectories) -> TT[0, float]: ValueError: if the loss is NaN. """ del env # unused - _, _, scores = self.get_trajectories_scores(trajectories) + _, _, scores = self.get_trajectories_scores( + trajectories, recalculate_all=recalculate_all + ) loss = (scores + self.logZ).pow(2).mean() if torch.isnan(loss): raise ValueError("loss is nan") @@ -65,7 +67,6 @@ class LogPartitionVarianceGFlowNet(TrajectoryBasedGFlowNet): """Dataclass which holds the logZ estimate for the Log Partition Variance loss. Attributes: - off_policy: Whether the GFlowNet samples trajectories on or off policy. log_reward_clip_min: If finite, clips log rewards to this value. Raises: @@ -76,16 +77,13 @@ def __init__( self, pf: GFNModule, pb: GFNModule, - off_policy: bool, log_reward_clip_min: float = -float("inf"), ): - super().__init__(pf, pb, off_policy=off_policy) + super().__init__(pf, pb) self.log_reward_clip_min = log_reward_clip_min def loss( - self, - env: Env, - trajectories: Trajectories, + self, env: Env, trajectories: Trajectories, recalculate_all: bool = False ) -> TT[0, float]: """Log Partition Variance loss. @@ -93,7 +91,9 @@ def loss( [ROBUST SCHEDULING WITH GFLOWNETS](https://arxiv.org/abs/2302.05446)) """ del env # unused - _, _, scores = self.get_trajectories_scores(trajectories) + _, _, scores = self.get_trajectories_scores( + trajectories, recalculate_all=recalculate_all + ) loss = (scores - scores.mean()).pow(2).mean() if torch.isnan(loss): raise ValueError("loss is NaN.") diff --git a/src/gfn/samplers.py b/src/gfn/samplers.py index 68b052a6..a486c682 100644 --- a/src/gfn/samplers.py +++ b/src/gfn/samplers.py @@ -31,9 +31,13 @@ def sample_actions( env: Env, states: States, save_estimator_outputs: bool = False, - calculate_logprobs: bool = True, + save_logprobs: bool = True, **policy_kwargs: Optional[dict], - ) -> Tuple[Actions, TT["batch_shape", torch.float]]: + ) -> Tuple[ + Actions, + TT["batch_shape", torch.float] | None, + TT["batch_shape", torch.float] | None, + ]: """Samples actions from the given states. Args: @@ -41,7 +45,7 @@ def sample_actions( env: The environment to sample actions from. states: A batch of states. save_estimator_outputs: If True, the estimator outputs will be returned. - calculate_logprobs: If True, calculates the log probabilities of sampled + save_logprobs: If True, calculates and saves the log probabilities of sampled actions. policy_kwargs: keyword arguments to be passed to the `to_probability_distribution` method of the estimator. For example, for @@ -71,7 +75,7 @@ def sample_actions( with torch.no_grad(): actions = dist.sample() - if calculate_logprobs: + if save_logprobs: log_probs = dist.log_prob(actions) if torch.any(torch.isinf(log_probs)): raise RuntimeError("Log probabilities are inf. This should not happen.") @@ -88,29 +92,30 @@ def sample_actions( def sample_trajectories( self, env: Env, - off_policy: bool, states: Optional[States] = None, n_trajectories: Optional[int] = None, - debug_mode: bool = False, + save_estimator_outputs: bool = False, + save_logprobs: bool = True, **policy_kwargs, ) -> Trajectories: """Sample trajectories sequentially. Args: env: The environment to sample trajectories from. - off_policy: If True, samples actions such that we skip log probability - calculation, and we save the estimator outputs for later use. states: If given, trajectories would start from such states. Otherwise, trajectories are sampled from $s_o$ and n_trajectories must be provided. n_trajectories: If given, a batch of n_trajectories will be sampled all starting from the environment's s_0. + save_estimator_outputs: If True, the estimator outputs will be returned. This + is useful for off-policy training with tempered policy. + save_logprobs: If True, calculates and saves the log probabilities of sampled + actions. This is useful for on-policy training. policy_kwargs: keyword arguments to be passed to the `to_probability_distribution` method of the estimator. For example, for DiscretePolicyEstimators, the kwargs can contain the `temperature` parameter, `epsilon`, and `sf_bias`. In the continuous case these kwargs will be user defined. This can be used to, for example, sample off-policy. - debug_mode: if True, everything gets calculated. Returns: A Trajectories object representing the batch of sampled trajectories. @@ -118,8 +123,6 @@ def sample_trajectories( AssertionError: When both states and n_trajectories are specified. AssertionError: When states are not linear. """ - save_estimator_outputs = off_policy or debug_mode - skip_logprob_calculaion = off_policy and not debug_mode if states is None: assert ( @@ -168,7 +171,7 @@ def sample_trajectories( env, states[~dones], save_estimator_outputs=True if save_estimator_outputs else False, - calculate_logprobs=False if skip_logprob_calculaion else True, + save_logprobs=save_logprobs, **policy_kwargs, ) if estimator_outputs is not None: @@ -184,7 +187,7 @@ def sample_trajectories( all_estimator_outputs.append(estimator_outputs_padded) actions[~dones] = valid_actions - if not skip_logprob_calculaion: + if save_logprobs: # When off_policy, actions_log_probs are None. log_probs[~dones] = actions_log_probs trajectories_actions += [actions] @@ -224,7 +227,9 @@ def sample_trajectories( trajectories_states = torch.stack(trajectories_states, dim=0) trajectories_states = env.states_from_tensor(trajectories_states) trajectories_actions = env.Actions.stack(trajectories_actions) - trajectories_logprobs = torch.stack(trajectories_logprobs, dim=0) + trajectories_logprobs = ( + torch.stack(trajectories_logprobs, dim=0) if save_logprobs else None + ) # TODO: use torch.nested.nested_tensor(dtype, device, requires_grad). if save_estimator_outputs: From 462c8c91d58718d15958f23646ee9bcaf9121364 Mon Sep 17 00:00:00 2001 From: Salem Date: Thu, 21 Mar 2024 20:34:34 +0400 Subject: [PATCH 02/13] fix flow matching + replay buffer --- src/gfn/containers/replay_buffer.py | 2 ++ src/gfn/states.py | 20 ++++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/gfn/containers/replay_buffer.py b/src/gfn/containers/replay_buffer.py index eebf24db..880314ea 100644 --- a/src/gfn/containers/replay_buffer.py +++ b/src/gfn/containers/replay_buffer.py @@ -1,6 +1,7 @@ from __future__ import annotations import os +import torch from typing import TYPE_CHECKING, Literal from gfn.containers.trajectories import Trajectories @@ -48,6 +49,7 @@ def __init__( elif objects_type == "states": self.training_objects = env.states_from_batch_shape((0,)) self.terminating_states = env.states_from_batch_shape((0,)) + self.terminating_states.log_rewards = torch.zeros((0,), device=env.device) self.objects_type = "states" else: raise ValueError(f"Unknown objects_type: {objects_type}") diff --git a/src/gfn/states.py b/src/gfn/states.py index 53492861..38323abe 100644 --- a/src/gfn/states.py +++ b/src/gfn/states.py @@ -128,9 +128,12 @@ def device(self) -> torch.device: def __getitem__(self, index: int | Sequence[int] | Sequence[bool]) -> States: """Access particular states of the batch.""" - return self.__class__( + out = self.__class__( self.tensor[index] ) # TODO: Inefficient - this might make a copy of the tensor! + if self._log_rewards is not None: + out.log_rewards = self._log_rewards[index] + return out def __setitem__( self, index: int | Sequence[int] | Sequence[bool], states: States @@ -168,6 +171,11 @@ def extend(self, other: States) -> None: # This corresponds to adding a state to a trajectory self.batch_shape = (self.batch_shape[0] + other_batch_shape[0],) self.tensor = torch.cat((self.tensor, other.tensor), dim=0) + if self._log_rewards is not None: + assert other._log_rewards is not None + self._log_rewards = torch.cat( + (self._log_rewards, other._log_rewards), dim=0 + ) elif len(other_batch_shape) == len(self.batch_shape) == 2: # This corresponds to adding a trajectory to a batch of trajectories @@ -258,6 +266,10 @@ def log_rewards(self) -> TT["batch_shape", torch.float]: def log_rewards(self, log_rewards: TT["batch_shape", torch.float]) -> None: self._log_rewards = log_rewards + def sample(self, n_samples: int) -> States: + """Samples a subset of the States object.""" + return self[torch.randperm(len(self))[:n_samples]] + class DiscreteStates(States, ABC): """Base class for states of discrete environments. @@ -340,7 +352,11 @@ def __getitem__( self._check_both_forward_backward_masks_exist() forward_masks = self.forward_masks[index] backward_masks = self.backward_masks[index] - return self.__class__(states, forward_masks, backward_masks) + out = self.__class__(states, forward_masks, backward_masks) + if self.log_rewards is not None: + log_probs = self._log_rewards[index] + out.log_rewards = log_probs + return out def __setitem__( self, index: int | Sequence[int] | Sequence[bool], states: DiscreteStates From 32ba79be1123e12a7e7394e24a50ef9e3bf0189f Mon Sep 17 00:00:00 2001 From: Salem Date: Thu, 21 Mar 2024 20:34:43 +0400 Subject: [PATCH 03/13] fix tests and scripts and tutorials --- testing/test_gflownet.py | 12 ++++--- testing/test_parametrizations_and_losses.py | 36 +++++++++---------- testing/test_samplers_and_trajectories.py | 27 +++++++------- tutorials/examples/train_box.py | 15 ++++---- tutorials/examples/train_discreteebm.py | 3 +- tutorials/examples/train_hypergrid.py | 14 ++++---- tutorials/examples/train_hypergrid_simple.py | 5 +-- tutorials/examples/train_line.py | 4 +-- .../notebooks/intro_gfn_continuous_line.ipynb | 28 ++++++--------- tutorials/notebooks/intro_gfn_smiley.ipynb | 1 - 10 files changed, 70 insertions(+), 75 deletions(-) diff --git a/testing/test_gflownet.py b/testing/test_gflownet.py index 35642020..718840bc 100644 --- a/testing/test_gflownet.py +++ b/testing/test_gflownet.py @@ -27,7 +27,7 @@ def test_trajectory_based_gflownet_generic(): ) pb_estimator = BoxPBEstimator(env=env, module=pb_module, n_components=1) - gflownet = TBGFlowNet(pf=pf_estimator, pb=pb_estimator, off_policy=False) + gflownet = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) mock_trajectories = Trajectories(env) result = gflownet.to_training_samples(mock_trajectories) @@ -46,7 +46,9 @@ def test_flow_matching_gflownet_generic(): module = BoxPFNeuralNet( hidden_dim=32, n_hidden_layers=2, n_components=1, n_components_s0=1 ) - estimator = DiscretePolicyEstimator(env, module, True) + estimator = DiscretePolicyEstimator( + module, n_actions=2, preprocessor=env.preprocessor + ) gflownet = FMGFlowNet(estimator) mock_trajectories = Trajectories(env) states_tuple = gflownet.to_training_samples(mock_trajectories) @@ -79,7 +81,7 @@ def test_pytorch_inheritance(): ) pb_estimator = BoxPBEstimator(env=env, module=pb_module, n_components=1) - tbgflownet = TBGFlowNet(pf=pf_estimator, pb=pb_estimator, off_policy=False) + tbgflownet = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) assert hasattr( tbgflownet.parameters(), "__iter__" ), "Expected gflownet to have iterable parameters() method inherited from nn.Module" @@ -87,7 +89,9 @@ def test_pytorch_inheritance(): tbgflownet.state_dict(), "__dict__" ), "Expected gflownet to have indexable state_dict() method inherited from nn.Module" - estimator = DiscretePolicyEstimator(env, pf_module, True) + estimator = DiscretePolicyEstimator( + pf_module, n_actions=2, preprocessor=env.preprocessor + ) fmgflownet = FMGFlowNet(estimator) assert hasattr( fmgflownet.parameters(), "__iter__" diff --git a/testing/test_parametrizations_and_losses.py b/testing/test_parametrizations_and_losses.py index f2e725bf..5904ddd4 100644 --- a/testing/test_parametrizations_and_losses.py +++ b/testing/test_parametrizations_and_losses.py @@ -57,7 +57,7 @@ def test_FM(env_name: int, ndim: int, module_name: str): ) gflownet = FMGFlowNet(log_F_edge) # forward looking by default. - trajectories = gflownet.sample_trajectories(env, off_policy=False, n_samples=10) + trajectories = gflownet.sample_trajectories(env, save_logprobs=True, n_samples=10) states_tuple = trajectories.to_non_initial_intermediary_and_terminating_states() loss = gflownet.loss(env, states_tuple) assert loss >= 0 @@ -71,11 +71,13 @@ def test_get_pfs_and_pbs(env_name: str, preprocessor_name: str): trajectories, _, pf_estimator, pb_estimator = trajectory_sampling_with_return( env_name, preprocessor_name, delta=0.1, n_components=1, n_components_s0=1 ) - gflownet_on = TBGFlowNet(pf=pf_estimator, pb=pb_estimator, off_policy=False) - gflownet_off = TBGFlowNet(pf=pf_estimator, pb=pb_estimator, off_policy=True) + gflownet_on = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) + gflownet_off = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) log_pfs_on, log_pbs_on = gflownet_on.get_pfs_and_pbs(trajectories) - log_pfs_off, log_pbs_off = gflownet_off.get_pfs_and_pbs(trajectories) + log_pfs_off, log_pbs_off = gflownet_off.get_pfs_and_pbs( + trajectories, recalculate_all=True + ) @pytest.mark.parametrize("preprocessor_name", ["Identity", "KHot"]) @@ -86,10 +88,12 @@ def test_get_scores(env_name: str, preprocessor_name: str): trajectories, _, pf_estimator, pb_estimator = trajectory_sampling_with_return( env_name, preprocessor_name, delta=0.1, n_components=1, n_components_s0=1 ) - gflownet_on = TBGFlowNet(pf=pf_estimator, pb=pb_estimator, off_policy=False) - gflownet_off = TBGFlowNet(pf=pf_estimator, pb=pb_estimator, off_policy=True) + gflownet_on = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) + gflownet_off = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) scores_on = gflownet_on.get_trajectories_scores(trajectories) - scores_off = gflownet_off.get_trajectories_scores(trajectories) + scores_off = gflownet_off.get_trajectories_scores( + trajectories, recalculate_all=True + ) assert all( [ torch.all(torch.abs(scores_on[i] - scores_off[i]) < 1e-4) @@ -189,28 +193,24 @@ def PFBasedGFlowNet_with_return( forward_looking=forward_looking, pf=pf, pb=pb, - off_policy=False, ) elif gflownet_name == "ModifiedDB": - gflownet = ModifiedDBGFlowNet(pf=pf, pb=pb, off_policy=False) + gflownet = ModifiedDBGFlowNet(pf=pf, pb=pb) elif gflownet_name == "TB": - gflownet = TBGFlowNet(pf=pf, pb=pb, off_policy=False) + gflownet = TBGFlowNet(pf=pf, pb=pb) elif gflownet_name == "ZVar": - gflownet = LogPartitionVarianceGFlowNet(pf=pf, pb=pb, off_policy=False) + gflownet = LogPartitionVarianceGFlowNet(pf=pf, pb=pb) elif gflownet_name == "SubTB": gflownet = SubTBGFlowNet( logF=logF, weighting=sub_tb_weighting, pf=pf, pb=pb, - off_policy=False, ) else: raise ValueError(f"Unknown gflownet {gflownet_name}") - trajectories = gflownet.sample_trajectories( - env, sample_off_policy=False, n_samples=10 - ) + trajectories = gflownet.sample_trajectories(env, save_logprobs=True, n_samples=10) training_objects = gflownet.to_training_samples(trajectories) _ = gflownet.loss(env, training_objects) @@ -307,13 +307,11 @@ def test_subTB_vs_TB( zero_logF=True, ) - trajectories = gflownet.sample_trajectories( - env, sample_off_policy=False, n_samples=10 - ) + trajectories = gflownet.sample_trajectories(env, save_logprobs=True, n_samples=10) subtb_loss = gflownet.loss(env, trajectories) if weighting == "TB": - tb_loss = TBGFlowNet(pf=pf, pb=pb, off_policy=False).loss( + tb_loss = TBGFlowNet(pf=pf, pb=pb).loss( env, trajectories ) # LogZ is default 0.0. assert (tb_loss - subtb_loss).abs() < 1e-4 diff --git a/testing/test_samplers_and_trajectories.py b/testing/test_samplers_and_trajectories.py index 71bdbc04..aa1b61b5 100644 --- a/testing/test_samplers_and_trajectories.py +++ b/testing/test_samplers_and_trajectories.py @@ -1,4 +1,4 @@ -from typing import Literal +from typing import Literal, Tuple import pytest @@ -11,18 +11,18 @@ BoxPFEstimator, BoxPFNeuralNet, ) -from gfn.modules import DiscretePolicyEstimator +from gfn.modules import DiscretePolicyEstimator, GFNModule from gfn.samplers import Sampler from gfn.utils import NeuralNet def trajectory_sampling_with_return( env_name: str, - preprocessor_name: str, + preprocessor_name: Literal["KHot", "OneHot", "Identity", "Enum"], delta: float, n_components_s0: int, n_components: int, -) -> Trajectories: +) -> Tuple[Trajectories, Trajectories, GFNModule, GFNModule]: if env_name == "HyperGrid": env = HyperGrid(ndim=2, height=8, preprocessor_name=preprocessor_name) elif env_name == "DiscreteEBM": @@ -33,10 +33,6 @@ def trajectory_sampling_with_return( if preprocessor_name != "Identity": pytest.skip("Useless tests") env = Box(delta=delta) - else: - raise ValueError("Unknown environment name") - - if env_name == "Box": pf_module = BoxPFNeuralNet( hidden_dim=32, n_hidden_layers=2, @@ -59,6 +55,10 @@ def trajectory_sampling_with_return( env=env, module=pb_module, n_components=n_components ) else: + raise ValueError("Unknown environment name") + + if env_name != "Box": + assert not isinstance(env, Box) pf_module = NeuralNet( input_dim=env.preprocessor.output_dim, output_dim=env.n_actions ) @@ -81,14 +81,17 @@ def trajectory_sampling_with_return( sampler = Sampler(estimator=pf_estimator) # Test mode collects log_probs and estimator_ouputs, not encountered in the wild. trajectories = sampler.sample_trajectories( - env, off_policy=False, n_trajectories=5, debug_mode=True + env, + save_logprobs=True, + n_trajectories=5, + save_estimator_outputs=True, ) # trajectories = sampler.sample_trajectories(env, n_trajectories=10) # TODO - why is this duplicated? states = env.reset(batch_shape=5, random=True) bw_sampler = Sampler(estimator=pb_estimator) bw_trajectories = bw_sampler.sample_trajectories( - env, off_policy=False, states=states + env, save_logprobs=True, states=states ) return trajectories, bw_trajectories, pf_estimator, pb_estimator @@ -101,11 +104,11 @@ def trajectory_sampling_with_return( @pytest.mark.parametrize("n_components", [1, 2, 5]) def test_trajectory_sampling( env_name: str, - preprocessor_name: str, + preprocessor_name: Literal["KHot", "OneHot", "Identity", "Enum"], delta: float, n_components_s0: int, n_components: int, -) -> Trajectories: +): if env_name == "HyperGrid": if delta != 0.1 or n_components_s0 != 1 or n_components != 1: pytest.skip("Useless tests") diff --git a/tutorials/examples/train_box.py b/tutorials/examples/train_box.py index 5a3cf8dd..8bf7ec5b 100644 --- a/tutorials/examples/train_box.py +++ b/tutorials/examples/train_box.py @@ -6,6 +6,7 @@ python train_box.py --delta {0.1, 0.25} --tied {--uniform_pb} --loss {TB, DB} """ + from argparse import ArgumentParser import numpy as np @@ -157,14 +158,12 @@ def main(args): # noqa: C901 pf=pf_estimator, pb=pb_estimator, logF=logF_estimator, - off_policy=False, ) else: gflownet = SubTBGFlowNet( pf=pf_estimator, pb=pb_estimator, logF=logF_estimator, - off_policy=False, weighting=args.subTB_weighting, lamda=args.subTB_lambda, ) @@ -172,13 +171,11 @@ def main(args): # noqa: C901 gflownet = TBGFlowNet( pf=pf_estimator, pb=pb_estimator, - off_policy=False, ) elif args.loss == "ZVar": gflownet = LogPartitionVarianceGFlowNet( pf=pf_estimator, pb=pb_estimator, - off_policy=False, ) assert gflownet is not None, f"No gflownet for loss {args.loss}" @@ -189,9 +186,11 @@ def main(args): # noqa: C901 if not args.uniform_pb: optimizer.add_param_group( { - "params": pb_module.last_layer.parameters() - if args.tied - else pb_module.parameters(), + "params": ( + pb_module.last_layer.parameters() + if args.tied + else pb_module.parameters() + ), "lr": args.lr, } ) @@ -232,7 +231,7 @@ def main(args): # noqa: C901 print(f"current optimizer LR: {optimizer.param_groups[0]['lr']}") trajectories = gflownet.sample_trajectories( - env, sample_off_policy=False, n_samples=args.batch_size + env, save_logprobs=True, n_samples=args.batch_size ) training_samples = gflownet.to_training_samples(trajectories) diff --git a/tutorials/examples/train_discreteebm.py b/tutorials/examples/train_discreteebm.py index 3574fa2d..45537686 100644 --- a/tutorials/examples/train_discreteebm.py +++ b/tutorials/examples/train_discreteebm.py @@ -10,6 +10,7 @@ [Learning GFlowNets from partial episodes for improved convergence and stability](https://arxiv.org/abs/2209.12782) python train_hypergrid.py --ndim {2, 4} --height 12 --R0 {1e-3, 1e-4} --tied --loss {TB, DB, SubTB} """ + from argparse import ArgumentParser import torch @@ -70,7 +71,7 @@ def main(args): # noqa: C901 validation_info = {"l1_dist": float("inf")} for iteration in trange(n_iterations): trajectories = gflownet.sample_trajectories( - env, off_policy=False, n_samples=args.batch_size + env, save_logprobs=True, n_samples=args.batch_size ) training_samples = gflownet.to_training_samples(trajectories) diff --git a/tutorials/examples/train_hypergrid.py b/tutorials/examples/train_hypergrid.py index 4d4e3a25..97157880 100644 --- a/tutorials/examples/train_hypergrid.py +++ b/tutorials/examples/train_hypergrid.py @@ -10,6 +10,7 @@ [Learning GFlowNets from partial episodes for improved convergence and stability](https://arxiv.org/abs/2209.12782) python train_hypergrid.py --ndim {2, 4} --height 12 --R0 {1e-3, 1e-4} --tied --loss {TB, DB, SubTB} """ + from argparse import ArgumentParser import torch @@ -37,7 +38,6 @@ def main(args): # noqa: C901 seed = args.seed if args.seed != 0 else DEFAULT_SEED set_seed(seed) - off_policy_sampling = False if args.replay_buffer_size == 0 else True device_str = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" use_wandb = len(args.wandb_project) > 0 @@ -122,7 +122,6 @@ def main(args): # noqa: C901 gflownet = ModifiedDBGFlowNet( pf_estimator, pb_estimator, - off_policy_sampling, ) if args.loss in ("DB", "SubTB"): @@ -153,14 +152,12 @@ def main(args): # noqa: C901 pf=pf_estimator, pb=pb_estimator, logF=logF_estimator, - off_policy=off_policy_sampling, ) else: gflownet = SubTBGFlowNet( pf=pf_estimator, pb=pb_estimator, logF=logF_estimator, - off_policy=off_policy_sampling, weighting=args.subTB_weighting, lamda=args.subTB_lambda, ) @@ -168,13 +165,11 @@ def main(args): # noqa: C901 gflownet = TBGFlowNet( pf=pf_estimator, pb=pb_estimator, - off_policy=off_policy_sampling, ) elif args.loss == "ZVar": gflownet = LogPartitionVarianceGFlowNet( pf=pf_estimator, pb=pb_estimator, - off_policy=off_policy_sampling, ) assert gflownet is not None, f"No gflownet for loss {args.loss}" @@ -224,7 +219,10 @@ def main(args): # noqa: C901 validation_info = {"l1_dist": float("inf")} for iteration in trange(n_iterations): trajectories = gflownet.sample_trajectories( - env, n_samples=args.batch_size, sample_off_policy=off_policy_sampling + env, + n_samples=args.batch_size, + save_logprobs=args.replay_buffer_size == 0, + save_estimator_outputs=False, ) training_samples = gflownet.to_training_samples(trajectories) if replay_buffer is not None: @@ -299,7 +297,7 @@ def main(args): # noqa: C901 "--loss", type=str, choices=["FM", "TB", "DB", "SubTB", "ZVar", "ModifiedDB"], - default="TB", + default="FM", help="Loss function to use", ) parser.add_argument( diff --git a/tutorials/examples/train_hypergrid_simple.py b/tutorials/examples/train_hypergrid_simple.py index d21ef349..98c3ecae 100644 --- a/tutorials/examples/train_hypergrid_simple.py +++ b/tutorials/examples/train_hypergrid_simple.py @@ -35,7 +35,7 @@ pb_estimator = DiscretePolicyEstimator( module_PB, env.n_actions, is_backward=True, preprocessor=env.preprocessor ) -gflownet = TBGFlowNet(init_logZ=0.0, pf=pf_estimator, pb=pb_estimator, off_policy=True) +gflownet = TBGFlowNet(init_logZ=0.0, pf=pf_estimator, pb=pb_estimator) # Feed pf to the sampler. sampler = Sampler(estimator=pf_estimator) @@ -56,7 +56,8 @@ trajectories = sampler.sample_trajectories( env, n_trajectories=batch_size, - off_policy=True, + save_logprobs=False, + save_estimator_outputs=True, epsilon=exploration_rate, ) optimizer.zero_grad() diff --git a/tutorials/examples/train_line.py b/tutorials/examples/train_line.py index ccbbf1cf..0fb9db09 100644 --- a/tutorials/examples/train_line.py +++ b/tutorials/examples/train_line.py @@ -228,7 +228,7 @@ def train( trajectories = gflownet.sample_trajectories( env, n_samples=batch_size, - sample_off_policy=True, + save_estimator_outputs=True, scale_factor=scale_schedule[iteration], # Off policy kwargs. ) training_samples = gflownet.to_training_samples(trajectories) @@ -291,7 +291,7 @@ def train( policy_std_max=policy_std_max, ) pb = StepEstimator(environment, pb_module, backward=True) - gflownet = TBGFlowNet(pf=pf, pb=pb, off_policy=True, init_logZ=0.0) + gflownet = TBGFlowNet(pf=pf, pb=pb, init_logZ=0.0) gflownet = train( gflownet, diff --git a/tutorials/notebooks/intro_gfn_continuous_line.ipynb b/tutorials/notebooks/intro_gfn_continuous_line.ipynb index 232abda1..52b35c8b 100644 --- a/tutorials/notebooks/intro_gfn_continuous_line.ipynb +++ b/tutorials/notebooks/intro_gfn_continuous_line.ipynb @@ -83,22 +83,15 @@ }, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "/tmp/ipykernel_145438/1097605799.py:20: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", - " Normal(torch.tensor(m), torch.tensor(s)) for m, s in zip(mus, self.sigmas)\n" + "ename": "TypeError", + "evalue": "Can't instantiate abstract class Line with abstract methods backward_step, step", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[2], line 154\u001b[0m\n\u001b[1;32m 151\u001b[0m plt\u001b[38;5;241m.\u001b[39mshow()\n\u001b[1;32m 153\u001b[0m \u001b[38;5;66;03m# Set up our simple environment.\u001b[39;00m\n\u001b[0;32m--> 154\u001b[0m env \u001b[38;5;241m=\u001b[39m \u001b[43mLine\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmus\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvariances\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0.2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0.2\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_sd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m4.5\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minit_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_steps_per_trajectory\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m5\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 155\u001b[0m render(env)\n", + "\u001b[0;31mTypeError\u001b[0m: Can't instantiate abstract class Line with abstract methods backward_step, step" ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHHCAYAAABDUnkqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/SrBM8AAAACXBIWXMAAA9hAAAPYQGoP6dpAAB01UlEQVR4nO3deVxU9f4/8NcM+w7Kosiq4oIL7oYbam5lpnVdckmzsjK10utNLUutm9hm+i2Xm12Xuu7lUmmmmablvoCggLggioAoArLOMHN+f/g7EygqAzNzzpx5PR+PeQiHmXPeM35gXvP5fM7nqARBEEBERESkEGqpCyAiIiIyJYYbIiIiUhSGGyIiIlIUhhsiIiJSFIYbIiIiUhSGGyIiIlIUhhsiIiJSFIYbIiIiUhSGGyIiIlIUhhsiBUpLS4NKpcLq1aulLsWievbsiZ49e0pdBhFJjOGGyMqsXr0aKpUKJ06ckLqUKs2dOxcqleqBt6ysLKlLtBnFxcWYO3cu9u/fL3UpRBZlL3UBRGR6oaGhKCkpgYODg2Q1LFu2DO7u7vdt9/b2Ntsxd+/ebbZ9W6Pi4mLMmzcPANijRTaF4YZIgVQqFZydnSWtYejQofD19bXoMR0dHR95n9LSUjg6OkKtZsc1kVLxt5tIgaqac/PCCy/A3d0dGRkZGDJkCNzd3eHn54fp06dDp9NVerxer8eiRYvQokULODs7IyAgAK+++ipu375tshr3798PlUqFTZs24aOPPkJQUBCcnZ3x+OOP48KFC4b7TZ48Ge7u7iguLr5vHyNHjkS9evUM9d8750Y8xoYNGzB79mw0aNAArq6uKCgoAABs3rwZ7du3h4uLC3x9fTFmzBhkZGRUOkZ1XzfxNf/ss8+wZMkSNGzYEK6urujXrx+uXr0KQRDw4YcfIigoCC4uLhg8eDByc3Pve06//PILunfvDjc3N3h4eGDgwIE4e/as0TWlpaXBz88PADBv3jzDsODcuXON+F8isk4MN0Q2RKfToX///qhbty4+++wzxMTE4PPPP8fXX39d6X6vvvoq/vWvf6Fr165YvHgxxo8fj7Vr16J///7QarXVOlZubi5u3rxZ6ZaXl3ff/RYsWICtW7di+vTpmDVrFo4cOYLRo0cbfj5ixAgUFRVhx44dlR5XXFyMn376CUOHDoWdnd1Da/nwww+xY8cOTJ8+HfPnz4ejoyNWr16N4cOHw87ODrGxsZgwYQK2bNmCbt263VdndV83AFi7di2WLl2KKVOm4J///Cf++OMPDB8+HLNnz8auXbswY8YMvPLKK/jpp58wffr0So/97rvvMHDgQLi7u+Pjjz/Ge++9h3PnzqFbt25IS0szqiY/Pz8sW7YMAPDMM8/gu+++w3fffYdnn332oa8VkSIIRGRVVq1aJQAQjh8//sD7XL58WQAgrFq1yrBt3LhxAgDhgw8+qHTftm3bCu3btzd8f/DgQQGAsHbt2kr327VrV5Xb7zVnzhwBQJW3pk2bGu63b98+AYDQvHlzoayszLB98eLFAgAhISFBEARB0Ov1QoMGDYR//OMflY6zadMmAYBw4MABw7aYmBghJibmvmM0bNhQKC4uNmzXaDSCv7+/0LJlS6GkpMSw/eeffxYACO+//77Rr5v4mvv5+Ql5eXmG7bNmzRIACFFRUYJWqzVsHzlypODo6CiUlpYKgiAId+7cEby9vYUJEyZUOk5WVpbg5eVVaXt1a8rJyREACHPmzBGIbAl7bohszGuvvVbp++7du+PSpUuG7zdv3gwvLy/07du3Uq9L+/bt4e7ujn379lXrOD/88AP27NlT6bZq1ar77jd+/PhKc2W6d+8OAIaaVCoVhg0bhp07d6KwsNBwv40bN6JBgwbo1q3bI2sZN24cXFxcDN+fOHECN27cwOuvv15pbtLAgQPRrFmz+3qJgEe/bqJhw4bBy8vL8H3nzp0BAGPGjIG9vX2l7RqNxjAMtmfPHuTl5WHkyJGVXnc7Ozt07ty5yte9ujUR2RpOKCayIc7OzoZ5GCIfH59Kc2lSU1ORn58Pf3//Kvdx48aNah2rR48e1ZpQHBIScl89ACrVNGLECCxatAg//vgjRo0ahcLCQuzcuROvvvoqVCrVI48RHh5e6fsrV64AAJo2bXrffZs1a4Y///yz0rbqvG4Pej5i0AkODq5yu7iP1NRUAEDv3r2rfA6enp41ronI1jDcENmQR81NAe5OJvb398fatWur/Pm9b6jmqkkQBMPXjz32GMLCwrBp0yaMGjUKP/30E0pKSjBixIhqHaNir40pazTmvo96nnq9HsDdeTf16tW7734Ve32MrYnI1jDcEFEljRo1wm+//YauXbvWOhSY0vDhw7F48WIUFBRg48aNCAsLw2OPPVajfYWGhgIAUlJS7uspSUlJMfzckho1agQA8Pf3R58+fUyyz+r0ahEpEefcEFElw4cPh06nw4cffnjfz8rLy6s848kSRowYgbKyMqxZswa7du3C8OHDa7yvDh06wN/fH8uXL0dZWZlh+y+//IKkpCQMHDjQFCUbpX///vD09MT8+fOrPCMtJyfH6H26uroCgGT/Z0RSYc8NkZVauXIldu3add/2N998s1b7jYmJwauvvorY2FjExcWhX79+cHBwQGpqKjZv3ozFixdj6NChj9zP999/X+UKxX379kVAQIDRdbVr1w6NGzfGu+++i7KysmoPSVXFwcEBH3/8McaPH4+YmBiMHDkS2dnZWLx4McLCwjB16tQa77umPD09sWzZMjz//PNo164dnnvuOfj5+SE9PR07duxA165d8dVXXxm1TxcXF0RGRmLjxo1o0qQJ6tSpg5YtW6Jly5ZmehZE8sBwQ2SlxDVM7vXCCy/Uet/Lly9H+/bt8Z///AfvvPMO7O3tERYWhjFjxqBr167V2sfEiROr3L5v374ahRvgbu/NRx99hMaNG6Ndu3Y12ofohRdegKurKxYsWIAZM2bAzc0NzzzzDD7++GOzXiLiYUaNGoXAwEAsWLAAn376KcrKytCgQQN0794d48ePr9E+v/nmG0yZMgVTp06FRqPBnDlzGG5I8VRCxVl7RERERFaOc26IiIhIURhuiIiISFEYboiIiEhRJA03Bw4cwKBBgxAYGAiVSoVt27Y98jH79+9Hu3bt4OTkhMaNG1e66jERERGRpOGmqKgIUVFRWLJkSbXuf/nyZQwcOBC9evVCXFwc3nrrLbz88sv49ddfzVwpERERWQvZnC2lUqmwdetWDBky5IH3mTFjBnbs2IHExETDtueeew55eXlVrvdBREREtseq5twcPnz4vmXJ+/fvj8OHDz/wMWVlZSgoKDDc8vPzkZOTA5lkOiIiIjIxqwo3WVlZ9y3+FRAQgIKCApSUlFT5mNjYWHh5eRlu3t7e8Pf3x507dyxRMlGtaDQazJs3D/PmzYNGo5G6HFIItitSOqsKNzUxa9Ys5OfnG25Xr16VuiQiIiIyI6u6/EK9evWQnZ1daVt2djY8PT0fePViJycnODk5WaI8IiIikgGr6rmJjo7G3r17K23bs2cPoqOjJaqIiIiI5EbScFNYWIi4uDjExcUBuHuqd1xcHNLT0wHcHVIaO3as4f6vvfYaLl26hLfffhvJyclYunQpNm3aJMkVfImIiEieJB2WOnHiBHr16mX4ftq0aQCAcePGYfXq1cjMzDQEHQAIDw/Hjh07MHXqVCxevBhBQUH45ptv0L9/f4vXTmQJarUaUVFRhq+JTIHtipRONuvcWEpBQQG8vLyQn58PT09PqcshIiIiE2NkJyIiIkWxqrOliGyNIAjQarUAAAcHB6hUKokrIiVguyKlY88NkYxptVrExsYiNjbW8GZEVFtsV6R0DDdERESkKAw3REREpCgMN0RERKQoDDdERESkKAw3REREpCgMN0RERKQoXOeGSMbUajUiIyMNXxOZAtsVKR0vv0BERESKwshOREREisJwQ0RERIrCOTdEMqbRaBAbGwsAmDVrFhwdHSWuiJSA7YqUjj03REREpCgMN0RERKQoDDdERESkKAw3REREpCgMN0RERKQoDDdERESkKDwVnEjG1Go1IiIiDF8TmQLbFSkdL79AREREisLITkTVtn//fqhUKuTl5UldCikI2xWZGsMNkUK88MILUKlUeO211+772aRJk6BSqfDCCy9YvrB7ZGZmYtSoUWjSpAnUajXeeustqUuih7CWdrVlyxb07dsXfn5+8PT0RHR0NH799VepyyKJMNwQyZhGo8H8+fMxf/58aDSaR94/ODgYGzZsQElJiWFbaWkp1q1bh5CQEHOWWm1lZWXw8/PD7NmzERUVJXU5NkmJ7erAgQPo27cvdu7ciZMnT6JXr14YNGgQTp8+LXVpJAGGGyKZ02q10Gq11bpvu3btEBwcjC1bthi2bdmyBSEhIWjbtm2l+5aVleGNN96Av78/nJ2d0a1bNxw/frzSfXbu3IkmTZrAxcUFvXr1Qlpa2n3H/PPPP9G9e3e4uLggODgYb7zxBoqKih5YY1hYGBYvXoyxY8fCy8urWs+LTE9p7WrRokV4++230bFjR0RERGD+/PmIiIjATz/9VK3nSMrCcEOkMC+++CJWrVpl+H7lypUYP378ffd7++238cMPP2DNmjU4deoUGjdujP79+yM3NxcAcPXqVTz77LMYNGgQ4uLi8PLLL2PmzJmV9nHx4kUMGDAA//jHP3DmzBls3LgRf/75JyZPnmzeJ0kWZ23tSq/X486dO6hTp04NnzFZNcHG5OfnCwCE/Px8qUsheqSysjJh7ty5wty5c4WysrKH3nfcuHHC4MGDhRs3bghOTk5CWlqakJaWJjg7Ows5OTnC4MGDhXHjxgmCIAiFhYWCg4ODsHbtWsPjNRqNEBgYKHzyySeCIAjCrFmzhMjIyErHmDFjhgBAuH37tiAIgvDSSy8Jr7zySqX7HDx4UFCr1UJJSckjn19MTIzw5ptvPvJ+ZFpKb1eCIAgff/yx4OPjI2RnZ1fr/qQsXOeGSMZ0OuDy5VAUFnrgjz9U6N0bsLN7+GP8/PwwcOBArF69GoIgYODAgfD19a10n4sXL0Kr1aJr166GbQ4ODujUqROSkpIAAElJSejcuXOlx0VHR1f6Pj4+HmfOnMHatWsN2wRBgF6vx+XLl9G8efOaPG0yM6W3q3Xr1mHevHnYvn07/P39H/7ESJEYbohkassW4I03HJCR8QIA4IcfgKAgYPFi4NlnH/7YF1980dCFv2TJErPVWFhYiFdffRVvvPHGfT+Ty0RTqkzp7WrDhg14+eWXsXnzZvTp08dcJZLMcc4NkQxt2QIMHQpkZFTenpFxd3uFeZ1VGjBgADQaDbRaLfr373/fzxs1agRHR0f89ddfhm1arRbHjx9HZGQkAKB58+Y4duxYpccdOXKk0vft2rXDuXPn0Lhx4/tujo6ORjxjsgSlt6v169dj/PjxWL9+PQYOHPjwJ0OKxnBDJDM6HfDmm8DdtcNVlX4mrif+1lt37/cgdnZ2SEpKwrlz52BXxXiDm5sbJk6ciH/961/YtWsXzp07hwkTJqC4uBgvvfQSAOC1115Damoq/vWvfyElJQXr1q3D6tWrK+1nxowZOHToECZPnoy4uDikpqZi+/btj5z4GRcXh7i4OBQWFiInJwdxcXE4d+7cI14Zqg2lt6t169Zh7Nix+Pzzz9G5c2dkZWUhKysL+fn51Xh1SHGknfJjeZxQTHK3b58g3H27efht377KjxMnfj5IxYmfgiAIJSUlwpQpUwRfX1/ByclJ6Nq1q3Ds2LFKj/npp5+Exo0bC05OTkL37t2FlStXVpr4KQiCcOzYMaFv376Cu7u74ObmJrRu3Vr46KOPHvocAdx3Cw0NrdbrQzWj9HYVExNTZbuqWBvZDl5bikhm1q8HRo169P3WrQNGjjR/PaQMbFdkSzgsRSQzfn7l1bqfvX2OmSshJUlO3let+9Wvb+ZCiCyA4YZIRm7evIk5c3oDuApA/4B76QGk45NPBsHGOl6phg4ePIgPPuiDR7erq0hMXGa5wojMhOGGSEbeeecdHDp0EC4uswCooFJVDi8qFaBSqeDkNBMnThzFDz/8IE2hZDUEQcCMGTMA6NGt2/dQqapqVwLuTjJ+E1OmvI6zZ89KUSqRyTDcEMlEenq64ayRX399FT/8oEJgYOX7BAUB33+vwowZEQCAd999F+Xl1RvGItv0008/4fDhw3BxccGmTc/h+++ralcqfP89MHjw3V6d+fPnS1ApkelwQjGRTEyePBlLlixBr1698PvvvwMASko0mDhxHQoLPfDqq0+jd28H2NndbceNGjXCzZs38fXXX2PChAkSV09ypNPpEBUVhbNnz2LWrFmG0PKgdnXq1Cm0b98earUaycnJiIiIkPgZENUMe26IZCAzMxPffPMNAOC9994zbLezA8LDr6BVq0TExAiGJfI9PT3x7rvvAgDmzp2LkpISi9dM8ve///0PZ8+ehY+PD95++23D9ge1q3bt2mHgwIHQ6/XsvSGrxnBDJAOfffYZysrK0LVrV/Ts2bNaj3nttdcQEhKC69ev48svvzRvgWR1SktL8f777wMAZs2aBW9v72o9TgzX3333HdLS0sxUHZF5MdwQSSwnJwfLly8HcPeNRaVSPeIRdzk7O+ODDz4AAMTGxuL27dtmq5Gsz7Jly5Ceno4GDRo8csXoijp37oy+fftCp9NhwYIFZqyQyHwYbogktnDhQhQXF6NDhw7o169fpZ+pVCoEBgYiMDCwytAzZswYtGjRAnl5efjkk08sVTLJXEFBAT766CMAd4ctXVxcKv38Ue1q9uzZAIBVq1bh2rVr5i+YyMQ4oZhIQrm5uQgNDUVhYSG2b9+Op59+2uh9/Pjjjxg8eDBcXFxw8eJF1OcqbDZv7ty5mDdvHpo1a4aEhATY29sbvY+YmBgcOHAAU6ZMwf/93/+ZoUoi82HPDZGEFi9ejMLCQkRFRWHQoEE12segQYPQsWNHlJSUYMOGDSaukKyNXq/Hf//7XwB3Q05Ngg3w99ybFStWICsry2T1EVkCww2RRMrKygyfiGfPnl3tuTb3UqlUGDNmDABwUT/C8ePHce3aNbi7u2Pw4ME13s/jjz+Oxx57DKWlpfjqq69MWCGR+THcEEnk999/R15eHho0aIBnn322yvtotVosWrQIixYtglarfeC+xMcfOnQI169fN0u9ZB3EgDtw4EA4OztXeZ/qtCuVSoU33nij0j6JrAXDDZFEtm3bBgB4+umnoVZX/asoCALy8/ORn5//0OtIBQUF4bHHHoMgCNi6das5yiUrIAgCvv/+ewDA0KFDH3q/6rSrJ598Eg4ODkhOTkZKSorJ6yUyF4YbIgno9Xr8+OOPAFCroYOK/vGPfwDgp2xbFhcXh8uXL8PFxQVPPPFErffn5eVlWHdp+/bttd4fkaUw3BBJ4Pjx48jKyoKnpyd69eplkn2K4eaPP/5ATk6OSfZJ1kUMtgMGDICbm5tJ9jlkyBAADDdkXRhuiCQgvlE88cQTcHR0NMk+w8PD0a5dO+j1esOQF9mO6g5JGUtcnuDw4cPIzs422X6JzInhhkgCYvgw1ZCUiENTtuvcuXNISUmBo6MjnnrqKZPtNygoCO3bt4cgCPj5559Ntl8ic2K4IbKw1NRUJCUlwd7e3iTzIioSP7Hv3buXl2OwMWKg7du3r8kXKBVDOIemyFow3BBZmPgG0bNnz0dezFClUsHPzw9+fn7VWgenSZMmaNmyJcrLyw0Tlsk2iENSYu/dwxjbrsRws2fPHhQVFdWuUCILYLghsjAx3FRnSMrBwQGvv/46Xn/9dTg4OFRr/2LvDYembEdqaqrhMgvmaFetWrVCWFgYSktLsXv3blOUTGRWDDdEFnTjxg389ddfAEw/30YkfnLfvXs3CgoKzHIMkhcxyPbq1Qt16tQx+f5VKhXPmiKrwnBDZEE///wzBEFAu3btEBwcbJZjtGjRAk2aNEFZWRl27NhhlmOQvBgzJFVTYhj/+eefUV5ebrbjEJkCww2RBRkzJAXcXSZ/6dKlWLp06UMvv1CRSqXi0JQNuXLlCk6ePAm1Wm3oXXmUmrSrbt26oU6dOrh16xYOHTpUi4qJzI/hhshCiouLsWfPHgDVDzeCICAnJwc5OTkPXSb/XuKb3G+//QadTmd0rWQ9xDkw0dHRCAgIqNZjatKu7O3tMXDgQAAcmiL5Y7ghspDdu3ejpKQEoaGhaN26tVmP1bZtW3h4eCA/Px8JCQlmPRZJ68CBAwBgspWuH0YM5du2bTMqbBNZGsMNkYX88ssvAO6+QVTn9NvasLe3R7du3QDcvRwDKZMgCIb/35iYGLMfr3///nBycsKlS5dw/vx5sx+PqKYYbogs5ODBgwCAxx9/3CLH69GjB4C/P9mT8ly5cgVXr16Fvb09oqOjzX48d3d3dO7cGQDw559/mv14RDXFcENkATdv3kRSUhIAoGvXrhY5ZsVwwyEEZRKDa4cOHUx2ocxHEXsExbBOJEeSh5slS5YgLCwMzs7O6Ny5M44dO/bQ+y9atAhNmzaFi4sLgoODMXXqVJSWllqoWqKaEde2iYyMRN26dS1yzA4dOsDFxaVSsCJlEYekxCBrCd27dwfAnhuSN0nDzcaNGzFt2jTMmTMHp06dQlRUFPr3748bN25Uef9169Zh5syZmDNnDpKSkvDf//4XGzduxDvvvGPhyomMI74RiJ96q0ulUsHLywteXl5Gz9NxdHQ0DFVwaEqZxP9XY+fb1KZdRUdHQ6VS4eLFi8jMzDTqsUQWI0ioU6dOwqRJkwzf63Q6ITAwUIiNja3y/pMmTRJ69+5dadu0adOErl27VvuY+fn5AgAhPz+/ZkUT1UDnzp0FAMK3335r0ePOmzdPACA899xzFj0umV9GRoYAQFCpVEJeXp5Fjx0VFSUAEDZt2mTR4xJVl2Q9NxqNBidPnkSfPn0M29RqNfr06YPDhw9X+ZguXbrg5MmThqGrS5cuYefOnXjyyScfeJyysjIUFBRUuhFZUnFxMU6ePAng7y59S+G8G+USe23atGkDLy8vix6bQ1Mkd5KFm5s3b0Kn09236FRAQACysrKqfMyoUaPwwQcfoFu3bnBwcECjRo3Qs2fPhw5LxcbGGrpfvby8zLbkPdGDHDt2DOXl5WjQoAFCQ0MteuzOnTvD0dER169fx8WLFy16bDKvmg5JmYI4vMpwQ3Il+YRiY+zfvx/z58/H0qVLcerUKWzZsgU7duzAhx9++MDHzJo1C/n5+Ybb1atXLVgx0d9nlXTr1s3o+Q1arRYrVqzAihUrqr1MfkUuLi7o1KkTAM67URrx/7Mmk4lr267EcBMXF8fecJIlycKNr68v7OzskJ2dXWl7dnY26tWrV+Vj3nvvPTz//PN4+eWX0apVKzzzzDOYP38+YmNjodfrq3yMk5MTPD09K92ILEn8dFuTISlBEHD9+nVcv369xsNK4psfF/NTjps3b+Ls2bMApGlXDRo0QHh4OPR6PY4cOWL044nMTbJw4+joiPbt22Pv3r2GbXq9Hnv37n3gYlTFxcVQqyuXbGdnBwCcT0CyVF5ebrjIoLFnSpkKF/NTHrE3sEWLFvD19ZWkBg5NkZxJOiw1bdo0rFixAmvWrEFSUhImTpyIoqIijB8/HgAwduxYzJo1y3D/QYMGYdmyZdiwYQMuX76MPXv24L333sOgQYMMIYdITs6cOYPCwkJ4enqiZcuWktTQpUsX2NnZIS0tDenp6ZLUQKZlyUsuPAgX8yM5s5fy4CNGjEBOTg7ef/99ZGVloU2bNti1a5dhknF6enqlnprZs2dDpVJh9uzZyMjIgJ+fHwYNGoSPPvpIqqdA9FDip9quXbtKFsA9PDzQrl07HD9+HAcOHMCYMWMkqYNMpzbzbUxFHA47evQoNBoNHB0dJauF6F6STyiePHkyrly5grKyMhw9etRw3RLg7gTi1atXG763t7fHnDlzcOHCBZSUlCA9PR1LliyBt7e35QsnqoaaLt5nauInfA5NWb/8/HzExcUBkDbcNGvWDHXr1kVJSQlOnz4tWR1EVZE83BAplSAIlc6UkhInFSvHn3/+CUEQEBERgfr160tWh0qlMlwnjUNTJDcMN0RmcunSJWRlZcHR0dFwOnZNuLq6wtXVtVa1iKehnz9//oHrSJF1MNWQlCnaFRfzI7mSdM4NkZKJf/A7dOgAZ2fnGu3D0dER//rXv2pdi4+PD1q3bo34+HgcPHgQw4YNq/U+SRqmWLzPVO2q4hlTgiAYvY4Tkbmw54bITMSuektfcuFBunTpAgA4fvy4xJVQTWm1WsP8lgctmWFJ7dq1g4uLC27duoXk5GSpyyEyYLghMhO5TCYWdejQAQBw4sQJiSuhmjp79izKysrg7e2NRo0aSV0OHB0dDSeBcGiK5IThhsgMcnJykJKSAuDvHpOa0Gq1WL16NVavXl2jZfIrEsPNyZMnH7iiN8mb2OvWoUOHWg0BmbJdcTE/kiPOuSEyA7F3pGnTpqhTp06N9yMIAq5cuWL4ujYiIyPh7OyMgoICXLhwAU2aNKnV/sjyxHYlBtWaMmW7euyxxwBwuJPkhT03RGZw6tQpAED79u0lruRv9vb2aNu2LQAOTVkrU4UbUxLbeHJyMoqKiiSuhuguhhsiMzh58iQAeYUbgPNurFlpaSkSEhIAyCvc1KtXD4GBgRAEwbC4IJHUGG6IzIDhhkwtISEBWq0Wvr6+CAkJkbqcSsR2LrZ7Iqkx3BCZ2M2bNw0XqBSHgeRCDDenTp2CTqeTuBoyRsUhKbmtJ8NwQ3LDcENkYuIf+IiICHh6ekpcTWVNmzaFm5sbioqKDGdzkXWQ43wbUbt27QAw3JB8MNwQmZiph6QcHBzg4OBgkn3Z2dkZ3og4NGVdTB1uTNmuxLaelJTEScUkCzwVnMjETBluHB0d8c4779R6PxV16NABBw8exIkTJzB27FiT7pvMo7i4GGfPngVgmnBj6nYVGBiIevXqISsrC/Hx8bVa24nIFNhzQ2RicjwNvCJOKrY+8fHx0Ol0hjOT5Ehs72L7J5ISww2RCd26dQtpaWkA/p6HIDdiuDl9+jTKy8slroaqQ86TiUWcVExywnBDZELip9bGjRvDy8ur1vsrLy/HunXrsG7dOpMFkcaNG8PT0xOlpaU4d+6cSfZJ5mXq+TbmaFcMNyQnDDdEJiT+YTdVr41er0dqaipSU1NNdj0otVpteCPi0JR1MHW4MUe7Etv8uXPnUFJSYpJ9EtUUww2RCcl18b57cd6N9SgsLERSUhIAeberBg0awN/fHzqdDvHx8VKXQzaO4YbIhBhuyNROnz4NQRAQFBSEevXqSV3OA6lUKg5NkWww3BCZyO3bt3H58mUA8p1MLBLDTXx8PDQajcTV0MPIefG+e/GMKZILhhsiExH/oDds2BA+Pj4SV/Nw4eHh8PHxgUajQWJiotTl0ENYY7hhzw1JjeGGyESsZUgKuDuEwKEp62CN4ebs2bMoLS2VuBqyZQw3RCZi6jOlzI3hRv7y8/Nx/vx5ANYRmoOCguDr64vy8nKcOXNG6nLIhvHyC0QmYo6eG0dHR8yZM8dk+6uI4Ub+xKHOsLAw+Pr6mmy/5mpX4qTiX3/9FSdPnkSnTp1Mfgyi6mDPDZEJ5OXl4eLFiwCsp+dGDGEJCQmcVCxTYrixljYFcN4NyQPDDZEJnD59GsDdT9h169aVuJrqCQkJgZeXF8rLyw3rqJC8iOvFtGnTRtpCjMAzpkgOGG6ITMBck4nLy8uxefNmbN682eTXgVKpVGjdujUAcH6ETIn/L+L/k6mYs12JvwOJiYkoKysz6b6JqovhhsgEzHUlcL1ej3PnzuHcuXMmWya/oqioKADgirIypNFoDNf+Ev+fTMWc7SokJAR169aFVqtFQkKCSfdNVF0MN0QmYI3DBwDDjZwlJydDq9XC09MToaGhUpdTbSqVyvB7wB5BkgrDDVEtlZaWIiUlBYDphw/MjcNS8lVxSEqlUklcjXHYrkhqDDdEtZSUlASdToc6deogMDBQ6nKM0rJlS6hUKty4cQNZWVlSl0MViL1pph6SsgSGG5Iaww1RLYnzCqzxE7arqysiIiIA8I1Ibsw1mdgSKoYbQRAkroZsEcMNUS2Jb0KtWrWSuJKa4bwbebLmnpvmzZtDrVbj1q1b7BEkSTDcENWSNX/CBhhu5Cg7OxvZ2dlQqVRo2bKl1OUYzcXFBU2aNAHAHkGSBi+/QFRL5gw3Dg4OmDVrluFrc+D8CPkR/y8aN24MNzc3k+/fUu0qOTkZZ86cQf/+/c1yDKIHYc8NUS3cuHHD8Am7RYsWJt+/SqWCo6MjHB0dzTafR+y5SUpK4qJrMiH2opmrN9AS7YqhmaTEcENUC+Jk4kaNGpnlE7YlBAcHw9vbG+Xl5UhOTpa6HMLfgcAa59uIxDloDDckBYYbolow93yb8vJybNu2Ddu2bTP5Mvmiipdh4LwbeTD3ZGJLtCuxTSUlJUGr1ZrlGEQPwnBDVAvmDjd6vR7x8fGIj483y+UXROKbKD9lS0+j0RguZGrN7So0NBQeHh7QarWGRS6JLIXhhqgWrP1MKRF7buTDWi+7cC9emJWkxHBDVEPl5eWGCxtae7ipeDo4F12TVsXJxNa2KOS9GG5IKgw3RDV04cIFlJaWws3NDeHh4VKXUystWrSAWq1GTk4OsrOzpS7HpilhMrGIk4pJKgw3RDUk/sFu2bIl1Grr/lWqeBkGDk1Jy9yngVuS+BzEswqJLMW6/yITSUgp821EnFQsD0rquRFXV7527Rpyc3MlroZsCcMNUQ1VvGCmEnBSsfSs/bIL9/Ly8kJYWBgA9t6QZfHyC0Q1ZIkLZjo4OGD69OmGr82J15iSnvjam+uyCyJLtqtWrVohLS0NZ86cQUxMjFmPRSRizw1RDeTn5yMtLQ2AecONSqWCm5sb3NzczH7mjBhukpOTeRkGiVhqSMqS7YpnTJEUGG6IaiAxMREAEBQUhDp16khcjWkEBQUZLsMgLiJHlqWkycQiTiomKTDcENWApebblJeXY8eOHdixY4fZlskXqVQqTiqWmKV6bizZriqGG3Ousk1UEcMNUQ1Y6kwpvV6PEydO4MSJExZ5Y+CkYulotVqzX3ZBZMl21bhxYzg7O6O4uBiXLl0y67GIRAw3RDVgicnEUhCfz9mzZyWuxPacP38eWq0W7u7uVn3ZhXvZ29sjMjISAHsEyXIYboiMJAiC4k4DF4mnH4tzishyxNe8ZcuWVn/ZhXtxUjFZGsMNkZHS09NRUFAABwcHNG3aVOpyTKpFixYAgIyMDNy+fVviamxLxXCjNJxUTJbGcENkJPEPdPPmzc2+RoileXp6IiQkBACHpixNyeGG15giS2O4ITKSkt+EAA5NSUXJ7Up8ThcvXkRJSYnE1ZAtYLghMpLYoyEO4SgNw43lFRcX4+LFiwCUGW4CAgJQp04dCIKA5ORkqcshG8DLLxAZyZKfsB0cHPDmm28avrYEhhvLS0pKgiAI8PX1hb+/v9mPZ+l2JV4r68CBA0hMTETbtm3NfkyybTXqubl48SJmz56NkSNH4saNGwCAX375hWP0pHg6nc6wFoklem5UKhW8vb3h7e1tsTNoKoYbQRAsckxbZ+kzpaRoV+LvC98nyBKMDjd//PEHWrVqhaNHj2LLli0oLCwEcHfRrzlz5pi8QCI5uXjxIsrKyuDi4oLw8HCpyzGLZs2aQa1W49atW8jOzpa6HJug5Pk2IvYIkiUZHW5mzpyJf//739izZw8cHR0N23v37o0jR46YtDgiuRE/dUZGRkKtNv+UNZ1Oh927d2P37t3Q6XRmPx4AuLi4oHHjxgD4RmQplg43UrQr9tyQJRn91zkhIQHPPPPMfdv9/f1x8+ZNkxRFJFfim5ClJhPrdDocPnwYhw8fttibEMBP2ZYmRbixdLsSf2fS0tIMPf5E5mJ0uPH29kZmZuZ920+fPo0GDRqYpCgiuRI/dSp5+ABguLGkvLw8XLt2DYByz8ADAF9fXwQEBAAAzp07J3E1pHRGh5vnnnsOM2bMQFZWFlQqFfR6Pf766y9Mnz4dY8eONbqAJUuWICwsDM7OzujcuTOOHTv20Pvn5eVh0qRJqF+/PpycnNCkSRPs3LnT6OMS1YSle26kwnBjOWJgDgoKgre3t7TFmBmHpshSjA438+fPR7NmzRAcHIzCwkJERkaiR48e6NKlC2bPnm3UvjZu3Ihp06Zhzpw5OHXqFKKiotC/f3/DGVj30mg06Nu3L9LS0vD9998jJSUFK1asYI8RWYRWq8X58+cB2E7PzdmzZy1yNXJbZguTiUUMzWQpRq9z4+joiBUrVuC9995DYmIiCgsL0bZtW0RERBh98IULF2LChAkYP348AGD58uXYsWMHVq5ciZkzZ953/5UrVyI3NxeHDh0yrM0QFhZm9HGJaiI1NRVarRYeHh4IDg6Wuhyzaty4MRwdHVFYWIj09HT+npmRLYUb9tyQpdT4dI+QkBA8+eSTGD58eI2CjUajwcmTJ9GnT5+/i1Gr0adPHxw+fLjKx/z444+Ijo7GpEmTEBAQgJYtW2L+/PkPnRBXVlaGgoKCSjeimqg4JKW0qzbfy8HBAc2aNQPAT9nmJr6+4vWXlKxijyCRORndc/Piiy8+9OcrV66s1n5u3rwJnU5nmGAmCggIeODy3JcuXcLvv/+O0aNHY+fOnbhw4QJef/11aLXaB66xExsbi3nz5lWrJqKHUfplF+7VsmVLnDlzBomJiXjqqaekLkeRBEEwXIjVFnpuIiMjAQDXrl1DXl6e4ucYkXSMDje3b9+u9L1Wq0ViYiLy8vLQu3dvkxVWFb1eD39/f3z99dews7ND+/btkZGRgU8//fSB4WbWrFmYNm2a4fuCggLFDymQeUgxmdjBwQETJ040fG1JnB9hfjdu3MCtW7egUqnQvHlzix1Xqnbl7e2NBg0aICMjA+fOnUOXLl0sdmyyLUaHm61bt963Ta/XY+LEiWjUqFG19+Pr6ws7O7v7VkDNzs5GvXr1qnxM/fr14eDgADs7O8O25s2bIysrCxqNptKigiInJyc4OTlVuy6iB5HiNHCVSmWRaw1VheHG/MTXtnHjxnBxcbHYcaVuVxkZGUhMTGS4IbMxyRKrarUa06ZNwxdffFHtxzg6OqJ9+/bYu3evYZter8fevXsRHR1d5WO6du2KCxcuVDp74/z586hfv36VwYbIVEpLS5GamgrAtoalgLsXdSwvL5e4GmWypcnEIk4qJksw2frxFy9eNPoP4LRp07BixQqsWbMGSUlJmDhxIoqKigxnT40dOxazZs0y3H/ixInIzc3Fm2++ifPnz2PHjh2YP38+Jk2aZKqnQVSllJQU6PV6+Pj4oH79+hY7rk6nw/79+7F//36LrlAMAKGhoXBzc4NGo8GFCxcsemxbIVW4kbJdsUeQLMHoYamK81eAuxPiMjMzsWPHDowbN86ofY0YMQI5OTl4//33kZWVhTZt2mDXrl2GScbp6emVrt8THByMX3/9FVOnTkXr1q3RoEEDvPnmm5gxY4axT4PIKFKdKaXT6fDHH38AALp06VJpSNbc1Go1WrRogWPHjiExMdFw9hSZjpThRqp2xZ4bsgSjw83p06crfa9Wq+Hn54fPP//8kWdSVWXy5MmYPHlylT/bv3//fduio6N5gU6yOFu57MK9WrZsaQg3Q4cOlbocRREEwSaHpcQzprKzs3Hz5k34+vpKXBEpkdHhZt++feaog0jWbOWyC/fiEIL5pKeno7CwEA4ODjVaK8xaubu7IywsDGlpaTh79ixiYmKkLokUyGRzboiUzNbWuBGJ4UZci4VMR3xNmzVrZvHT/KXGoSkyt2r13LRt27ba8wxOnTpVq4KI5KaoqAiXL18GYFvDB8Dfz/fChQsoKSmx6OnKSmervYHA3Xa1Y8cO9giS2VQr3AwZMsTMZRDJV1JSEgRBgJ+fH/z8/KQux6Lq1auHOnXqIDc3FykpKWjTpo3UJSmG2GthC5dduBd7bsjcqhVuHrT6L5EtsNXJxMDdxd5atGiBgwcPIjExkeHGhGy95wa4+7slCILir9VGlmf0hGIiWyPlm5C9vT1efvllw9dSaNmyJQ4ePMhP2Sak0+mQlJQEQJrQLHW7atasGdRqNW7duvXQVemJasroVq3T6fDFF19g06ZNSE9Ph0ajqfTz3NxckxVHJAdS9tyo1Wo0aNDA4setSAx1nB9hOhcvXkRZWRlcXFwQHh5u8eNL3a5cXFzQqFEjpKamIjExkeGGTM7os6XmzZuHhQsXYsSIEcjPz8e0adPw7LPPQq1WY+7cuWYokUhatjx8APB0cHMQX8vIyMhKC5XaEs67IXMy+rdq7dq1WLFiBf75z3/C3t4eI0eOxDfffIP333+fi+uR4hQUFODq1asApAk3Op0Of/31F/766y+LL5MvEp93WloaCgsLJalBaaReWkBO7YrhhszB6HCTlZVlmN3v7u6O/Px8AMBTTz2FHTt2mLY6IomJ8yLq168PHx8fix9fp9Pht99+w2+//SbZm5Cvr6/hkijnzp2TpAalkXplYjm0K4YbMiejw01QUBAyMzMBAI0aNcLu3bsBAMePH4eTk5NpqyOSmNSfsOWCb0SmxXb193M/d+4cBEGQuBpSGqPDzTPPPIO9e/cCAKZMmYL33nsPERERGDt2bI2uLUUkZ3wTuovzbkxHo9EgJSUFgG0uLyBq2rQp1Go18vLyDB+YiUyl2mdLffXVVxgzZgwWLFhg2DZixAiEhITg8OHDiIiIwKBBg8xSJJFUxHAjXuzPVrHnxnRSU1NRXl4ODw8PBAcHS12OZJycnNC4cWOcP38eZ8+eRWBgoNQlkYJUu+fm3XffRWBgIEaPHo3ff//dsD06OhrTpk1jsCFFYs/NXey5MZ2KZ9/Z+uJ1DM1kLtUON1lZWVi+fDmuX7+Ovn37Ijw8HB9++KHhTBIipcnPz8e1a9cAMNyIzz8jIwN5eXnSFmPlbHnF63sx3JC5VDvcuLi4YOzYsdi3bx9SU1Px/PPP47///S/Cw8MxYMAAbN68GVqt1py1ElmUeGZQYGAgvL29pS1GYl5eXggKCgLAN6LasvV1kypiuCFzqdHqUQ0bNsQHH3yAy5cv45dffkHdunXxwgsvSL6SKpEpyWFIyt7eHuPGjcO4ceMku/yCiENTpiH1aeCAfNpVxXDDM6bIlGq1NKZKpYK9vT1UKhUEQWDPDSmKHMKNWq1GWFgYwsLCJF/Jlp+ya6+kpAQXL14EwHYFAE2aNIGdnR0KCgqQkZEhWR2kPDVq1VevXsUHH3yAhg0bom/fvrh+/TpWrFjB0/lIUeQQbuSEPTe1l5ycDL1ejzp16vB6Srh7xlRERAQAhmYyrWqHG41Ggw0bNqBfv34IDw/HihUrMGrUKJw/fx6///47Ro8eDWdnZ3PWSmRRcgg3Op0Ox44dw7FjxyRbSVbEnpvaq9impDxTiu2KlK7ag6316tVDcXExnnrqKfz000/o37+/5N3kROaSl5eH69evA5B2jRudTodffvkFANCmTRvY2dlJVov4Oty4cQM5OTnw8/OTrBZrJYf5NoC82lWLFi3www8/MNyQSVU7ncyePRtXr17F999/jyeeeILBhhRNPFMqKCgIXl5eElcjD25ubggPDwfAT9k1JYfeQLlhzw2ZQ7UTyrRp0/hJjWwG34Sqxnk3tSOXnhs54TWmyBzY/UJUBYabqvFTds0VFhYiLS0NANtVRREREbC3t8edO3e4KCyZDMMNURUYbqrGnpuaE4c6AwIC4OvrK3E18uHo6IgmTZoAYGgm02G4IaoCw03VxHDDRdeMx8suPBh7BMnUGG6I7nH79m3Dmk22fjXwezVt2hRqtbrSa0TVw8suPBjDDZlatU4FnzZtWrV3uHDhwhoXQyQH4h/YkJAQeHh4SFqLvb09Ro4cafhaas7OzoiIiEBKSgoSExMRGBgodUlWQ06TieXWrhhuyNSq1apPnz5d6ftTp06hvLwcTZs2BQCcP38ednZ2aN++vekrJLIwOQ1JqdVqw3wEuWjRogVSUlJw9uxZ9OvXT+pyrAbb1YNVPGNKr9dzqRGqtWqFm3379hm+XrhwITw8PLBmzRr4+PgAuNuNP378eHTv3t08VRJZkJzehOSoZcuW2LJlCxISEqQuxWrk5eUZrp3EdnW/xo0bw8HBAUVFRUhPT0dYWJjUJZGVMzoef/7554iNjTUEGwDw8fHBv//9b3z++ecmLY5ICnIKNzqdDnFxcYiLi5N8mXwRz5gynhgEg4ODZbEopNzalYODg2EkQDyrjKg2jA43BQUFyMnJuW97Tk4O7ty5Y5KiiKQk/nGVS7jZvn07tm/fLos3IQBo1aoVgLshUK/XS1yNdRDDjfjaSU2O7YrzbsiUjA43zzzzDMaPH48tW7bg2rVruHbtGn744Qe89NJLePbZZ81RI5HF5ObmIisrCwDQvHlziauRp8aNG8PJyQnFxcW4fPmy1OVYBbGXSy7hRo7EMxMZbsgUjA43y5cvxxNPPIFRo0YhNDQUoaGhGDVqFAYMGIClS5eao0YiixH/sIaGhsLd3V3iauTJ3t7e8EbEeTfVI7eeGzlizw2ZklHhRqfT4cSJE/joo49w69YtnD59GqdPn0Zubi6WLl0KNzc3c9VJZBFymm8jZ+K8G4abRxMEwfA6yeE0cLkSXxvxjCmi2jAq3NjZ2aFfv37Iy8uDm5sbWrdujdatWzPUkGJw+KB6xNeH4ebRrl27hvz8fNjZ2aFZs2ZSlyNbjRo1Mgx3itfgIqopo4elWrZsiUuXLpmjFiLJyWmhNTljuKk+sU01bdoUTk5OElcjX/b29oZ5bjwTj2rL6HDz73//G9OnT8fPP/+MzMxMFBQUVLoRWStBEBhuqkkMN6mpqSgtLZW4GnnjfJvq4zIDZCpGr7v95JNPAgCefvppqFQqw3ZBEKBSqWRzWiGRsbKzs3Hr1i2o1WrZDB/Y29tj6NChhq/lIjAwED4+Prh9+zaSk5PRpk0bqUuSLTmGG7m2K4YbMhWjW3XF1YqJlER8E4qIiICzs7PE1dylVqtlOblZpVKhVatWOHDgABISEhhuHkKO4Uau7YrhhkzF6HATExNjjjqIJMchKeO0bNnSEG6oalqtFklJSQDYrqpDfI2Sk5Oh1Wrh4OAgcUVkrWrcH1lcXIz09HRoNJpK21u3bl3rooikIMdwo9frDW+OzZs3l9UFBTmp+NEuXLgAjUYDNzc3WV0vSa7tKiQkBO7u7igsLERqaqphPSUiYxndonNycvDUU0/Bw8MDLVq0QNu2bSvdiKyVHMNNeXk5vv/+e3z//fcoLy+XupxKGG4ereL6NnIJEIB825VKpeLQFJmE0b9tb731FvLy8nD06FG4uLhg165dWLNmDSIiIvDjjz+ao0Yis9Pr9YYF/OQUbuRMfJ0yMjJw+/ZtiauRJznOt5E7hhsyBaPDze+//46FCxeiQ4cOUKvVCA0NxZgxY/DJJ58gNjbWHDUSmd2VK1dQVFQER0dHNG7cWOpyrIKXlxdCQkIA8I3oQRhujMdwQ6ZgdLgpKiqCv78/AMDHx8dwhfBWrVrh1KlTpq2OyELEP6TNmzeX1amxcsehqYdjuDEeww2ZgtHhpmnTpkhJSQEAREVF4T//+Q8yMjKwfPly1K9f3+QFElmCHOfbWANeY+rBioqKDKu5s11Vn/haXbhwASUlJRJXQ9bK6I+ob775JjIzMwEAc+bMwYABA7B27Vo4Ojpi9erVpq6PyCIYbmqGPTcPJs7hCggIgJ+fn8TVWA9/f3/4+vri5s2bSEpKQrt27aQuiayQ0eFmzJgxhq/bt2+PK1euIDk5GSEhIfD19TVpcUSWwnBTM2K4SUxMNKxSTndxSKpmxDOm9u/fj8TERIYbqhGjw82lS5fQsGFDw/eurq5sfGTVtFotkpOTAcgv3NjZ2WHw4MGGr+WmWbNmsLe3R35+Pq5du4bg4GCpS5INOYcbuberiuGGqCaMDjeNGzdGUFAQYmJi0LNnT8TExPDsErJq4kJr7u7uhrN/5MLOzk7WlzZwdHRE06ZNcfbsWSQkJDDcVCC+Mcs13Mi5XXFSMdWW0ROKr169itjYWLi4uOCTTz5BkyZNEBQUhNGjR+Obb74xR41EZiX+AW3RooWsFlqzFpx3UzU599zIHcMN1ZbRf8kbNGiA0aNH4+uvv0ZKSgpSUlLQp08fbNq0Ca+++qo5aiQyKznPt9Hr9Th//jzOnz8PvV4vdTlV4hlT97tx4wZu3LgBlUoly0sIyL1diRf1vHr1KvLz8yWuhqyR0eGmuLgYu3fvxjvvvIMuXbqgdevWiI+Px+TJk7FlyxZz1EhkVnION+Xl5Vi/fj3Wr18vq2XyK2LPzf3E16JRo0ZwdXWVuJr7yb1deXt7IygoCMDfZ50RGcPoOTfe3t7w8fHB6NGjMXPmTHTv3h0+Pj7mqI3IIuQcbqyBGG6SkpJ4Jef/j0NStdeyZUtcu3YNiYmJ6NKli9TlkJUxuufmySefhE6nw4YNG7BhwwZs3rwZ58+fN0dtRGZXUlKCCxcuAGC4qanQ0FC4u7tDq9Xyb8H/x3BTexzupNowOtxs27YNN2/exK5duxAdHY3du3eje/fuhrk4RNYkOTkZer0edevWRUBAgNTlWCW1Wo3WrVsDAOLj4yWuRh7E1yEqKkriSqwXJxVTbdT41JBWrVqha9euiI6ORseOHXHjxg1s3LjRlLURmV3FISkuQFdz4mnFcXFxktYhB1qt1tCu5Hy6tdxV7LkRBEHiasjaGB1uFi5ciKeffhp169ZF586dsX79ejRp0gQ//PCD4SKaRNZCzmuRWBOGm7+lpKSgrKwMnp6eCAsLk7ocqxUZGQmVSoVbt27hxo0bUpdDVsboCcXr169HTEwMXnnlFXTv3h1eXl7mqIvIIsTxfM63qZ2K4cbWL8MgBryoqCium1QLLi4uaNy4MVJTU5GYmMhhYzKK0eHm+PHj5qiDSBIVF/CTIzs7OzzxxBOGr+WqZcuWUKvVyMnJQWZmJgIDA6UuSTJiuJHzkJQ1tavU1FQkJCTg8ccfl7ocsiI1+lhx8OBBjBkzBtHR0cjIyAAAfPfdd/jzzz9NWhyROd2+fRtXr14FIN9hKTs7O3Tq1AmdOnWS9ZuQi4sLmjVrBoBDU9YSbqyhXXENJaopo8PNDz/8gP79+8PFxQWnT59GWVkZACA/Px/z5883eYFE5nLmzBkAd09l5vBq7Ylv5rZ8xpQgCJWGpah2eBYe1ZTR4ebf//43li9fjhUrVlRarKtr1644depUjYpYsmQJwsLC4OzsjM6dO+PYsWPVetyGDRugUqkwZMiQGh2XbJs1nK6r1+uRlpaGtLQ0WS6TXxEnFQMZGRm4desW7OzsZDvUCVhPuxJ/NxMTE2W5kjLJl9HhJiUlBT169Lhvu5eXF/Ly8owuYOPGjZg2bRrmzJmDU6dOISoqCv3793/k7Pi0tDRMnz4d3bt3N/qYRMDfPTdyDjfl5eVYs2YN1qxZI/s/7gw3fz/35s2bw9nZWdpiHsJa2lXDhg3h5uaGsrIypKamSl0OWRGjw029evUMK7pW9Oeff6Jhw4ZGF7Bw4UJMmDAB48ePR2RkJJYvXw5XV1esXLnygY/R6XQYPXo05s2bV6NjEgF/99yIXd9UO2JITE1NRWFhocTVSENsU3Keb2NN1Gq1Yd4Nh6bIGEaHmwkTJuDNN9/E0aNHoVKpcP36daxduxbTp0/HxIkTjdqXRqPByZMn0adPn78LUqvRp08fHD58+IGP++CDD+Dv74+XXnrpkccoKytDQUFBpRtReXm54UwpOffcWBN/f3/Ur18fgiDY7ARQa5hMbG3E30+GGzKG0aeCz5w5E3q9Ho8//jiKi4vRo0cPODk5Yfr06ZgyZYpR+7p58yZ0Ot196xcEBAQgOTm5ysf8+eef+O9//1vtru/Y2FjMmzfPqLpI+S5cuIDS0lK4urqiUaNGUpejGG3atEFmZibi4uIQHR0tdTkWx3BjemK4EYeRiarD6J4blUqFd999F7m5uUhMTMSRI0eQk5ODDz/8ECUlJeao0eDOnTt4/vnnsWLFCvj6+lbrMbNmzUJ+fr7hJp76S7ZN/BTYqlUrLrRmQrY87+bOnTuGIXv2BpoOz5iimjC650bk6OiIyMhIAHeHfhYuXIhPPvkEWVlZ1d6Hr68v7OzskJ2dXWl7dnY26tWrd9/9L168iLS0NAwaNMiwTZzpb29vj5SUlPs+hTs5OcHJyanaNZFtsIYzpayRLYcbsWehQYMG1f7wRY8mhhvxTLS6detKXBFZg2p/ZC0rK8OsWbPQoUMHdOnSBdu2bQMArFq1CuHh4fjiiy8wdepUow7u6OiI9u3bY+/evYZter0ee/furbJLu1mzZkhISEBcXJzh9vTTT6NXr16Ii4tDcHCwUccn22UNZ0pZIzHcJCQkQKfTSVuMhXFIyjw8PDwMJ45waIqqq9o9N++//z7+85//oE+fPjh06BCGDRuG8ePH48iRI1i4cCGGDRtWo5Uup02bhnHjxqFDhw7o1KkTFi1ahKKiIowfPx4AMHbsWDRo0ACxsbFwdna+7xpA3t7eAHhtIDKOtZwpZWdnZ5hwL+eVZEWNGjWCm5sbioqKkJqaali12BZYU7ixtnbVunVrXLp0CfHx8ejVq5fU5ZAVqHa42bx5M7799ls8/fTTSExMROvWrVFeXo74+PhaXSRvxIgRyMnJwfvvv4+srCy0adMGu3btMkwyTk9P55wIMqnc3Fxcu3YNgHWEm65du0pdRrXZ2dmhdevWOHz4MOLi4hhuZMra2lVUVBS2bdvGeTdUbdUON9euXUP79u0B3O0lcXJywtSpU01y9d/Jkydj8uTJVf5s//79D33s6tWra318si1i13Z4eDg8PT0lrkZ52rRpYwg3zz33nNTlWER5ebnh9HdrCDfWhmdMkbGqHW50Oh0cHR3/fqC9Pdzd3c1SFJE5WcuQFHB3DlpmZiYAoH79+lbRiym+EdnSpOKUlBSUlZXB3d3dKhYWtbZ2Jf6unj17FuXl5bC3r/G5MGQjqt1CBEHACy+8YDjzqLS0FK+99hrc3Nwq3W/Lli2mrZDIxKzpTKny8nJ88803AO4ua1DxA4Zc2eIZU+Jzbd26teyDAmB97So8PBzu7u4oLCxESkqKrK/bRfJQ7XAzbty4St+PGTPG5MUQWQLPlDIvce2g7OxsZGVlVbmsg9JY03wba6RWq9G6dWscOnQIZ86cYbihR6p2uFm1apU56yCyiIqXXbCGYSlr5OrqiiZNmiA5ORlxcXEYMGCA1CWZHa8pZX5iuImPj8fIkSOlLodkTv79p0QmdP78eZSVlcHNzc0q5kZYK/FN3hbObhEEgT03FsBrTJExGG7IpohDUrzsgnmJb/KnT5+WthALuH79OnJycqBWq7nelhmJPa08Y4qqg3/dyaZY02Ria9a2bVsAwIkTJySuxPzE59i8eXO4uLhIXI1ytWrVCsDdMHnz5k2JqyG5Y7ghm8JwYxkdOnQAcPd6cLm5uRJXY17Hjx8HAHTq1EniSpTNw8PDcO1ADk3RozDckE0Ru7StZTKxnZ0dYmJiEBMTYxXL5Ivq1KmDxo0bA1B+740Ybjp27ChxJdVnre2KQ1NUXQw3ZDNu3bqFjIwMANYVbnr27ImePXta1ZsQ8Pebvfjmr0SCIBjCm7WFG2tsV5xUTNXFcEM2Q/yD2LBhQ3h4eEhcjfLZQri5dOkScnNz4ejoaDWB2Zox3FB1MdyQzbCmyy6IBEHAjRs3cOPGDQiCIHU5RrGFcCM+t6ioKNmv8luRtbYr8Xf33Llz0Gq1EldDcsZwQzbj1KlTAP4+k8caaLVaLFu2DMuWLbO6P+Zt27aFWq3G9evXDcOBSmOtk4mttV2Fh4fDy8sLGo0GZ8+elbockjGGG7IZJ0+eBADD1e3JvNzc3AzL5Cu19+bYsWMArGu+jTVTqVRo164dgL9/n4mqwnBDNqGwsBDJyckAGG4sSezRUGK4KS8vN/QGMtxYjvj7y3BDD8NwQzYhPj4egiAgMDDQJi7kKBdKnneTlJSE4uJiuLu7o2nTplKXYzMYbqg6GG7IJnBIShpiuDlx4oRVTVytDjGwtW/f3qpOp7Z24u9wfHy8Vc0XIstiuCGbwHAjjVatWsHJyQm3b9/GxYsXpS7HpKx1MrG1a9SoETw9PVFWVoakpCSpyyGZYrghmyCGG3EyIlmGg4OD4SKaShuassaViZVArVYbznjk0BQ9CMMNKV5RUZHhE5619dzY2dkhOjoa0dHRVjv0ocR5N6WlpYZLAFhjuLH2dsV5N/Qo9lIXQGRuZ86cgV6vR7169RAYGCh1OUaxs7NDv379pC6jVsRhG/G0aSUQ53v4+voiNDRU6nKMZu3tiuGGHoU9N6R4nG8jLbFn49SpUygvL5e4GtOoOCSlUqkkrsb2VJxUrJQ2RabFcEOKZ83hRhAE5OXlIS8vz2rPNmrSpAk8PT1RUlKCc+fOSV2OSVj7ZGJrb1cRERHw8PBASUkJJxVTlRhuSPGsOdxotVosXrwYixcvttrTXtVqteG1V8q8G2ufTGzt7aripGJxIUWiihhuSNEq9hZYY7hRCiVNKr5z545htWtrDTdKwHk39DAMN6Ro8fHx0Ol08Pf3t7rJxEqipHBz8uRJCIKAkJAQ+Pv7S12OzeI1puhhGG5I0cQu6/bt23Pip4TEcHPmzBmUlpZKXE3tWPuQlFKIPTdxcXHQ6XQSV0Nyw3BDimbN822UROzlKC8vR1xcnNTl1Ip4Sru1TiZWiiZNmsDNzQ3FxcWGYUIiEcMNKRrDjTyoVCo89thjAIA///xT4mpqThAEQ/3i8yFp2NnZcaVieiCGG1Ks0tJSnD17FgDDjRz06NEDAHDw4EGJK6m5ixcvIisrC46Ojuy5kQFOKqYH4QrFpFhnzpxBeXk5/Pz8EBQUJHU5NaJWq9GhQwfD19asYrjR6/VW+XwOHDgAAOjcuTOcnZ0lrqbmlNKuxHDD08HpXgw3pFhKmExsb2+PgQMHSl2GSbRt2xZubm64ffs2zp49i1atWkldktHEcNO9e3eJK6kdpbQrMdycPn0aOp3OKq+TReZhvZGd6BF4JXB5sbe3R5cuXQBY79CUWLfYC0XSatq0KVxdXVFUVITz589LXQ7JCMMNKZYSJhMLgoCioiIUFRVZ5TL59xJ7PMQeEGty7do1XLp0CWq1GtHR0VKXUytKaVd2dnZo06YNAM67ocoYbkiRSktLkZiYCMC6w41Wq8Vnn32Gzz77zCqXyb+X2ONx4MABq3tTFXtt2rZtC09PT4mrqR0ltSvx9/vEiRMSV0JywnBDinTq1ClotVoEBAQgJCRE6nLo/+vUqRMcHByQmZmJS5cuSV2OUcRwY+3zbZSmc+fOAIAjR45IXAnJCcMNKdLhw4cBANHR0VY7mViJXFxcDKdQW9vQlFgv59vIizhEeOrUKatf/ZpMh+GGFKliuCF5EXs+rGlS8a1btwxrJnXr1k3iaqii8PBw+Pv7Q6vV8pRwMmC4IcURBIHhRsYqzruxFuKqxM2bN4efn5/E1VBFKpXK8Hsu/t4TMdyQ4ly9ehXXr1+Hvb29YaEyko8uXbpArVbj4sWLuH79utTlVAuHpOSN4YbuxXBDiiP+gWvTpg1cXFwkrobu5eXlhaioKADWMzTFycTyVjHcWNtZeGQeDDekOIcOHQKgjCEptVqNqKgoREVFWfUy+feypqGpwsJCw1wOpfTcKK1ddejQAfb29rh+/TquXr0qdTkkA9bfqonuoaT5Nvb29hgyZAiGDBkCe3vlXC3FmiYVHz58GDqdDqGhoQgODpa6HJNQWrtydXU19AZyaIoAhhtSmJKSEpw+fRqAMsKNUonhJiEhAbm5uRJX83Ccb2MdOO+GKmK4IUU5efIkysvLUa9ePYSGhkpdTq0JggCNRgONRqOouQT+/v5o2rQpgL/PRJIrpVwssyIltiuGG6qI4YYURWmL92m1WsTGxiI2Ntbql8m/l9gTIuehqbKyMhw9ehSAsnpulNiuxHBz+vRpLuZHDDekLEqab6N0Yk/I/v37pS3kIY4ePYqysjL4+/ujSZMmUpdDDxEWFoaAgABotVpeRJMYbkg5uHifdXn88ccB3B1KvHHjhsTVVO2XX34BcLdWJfQEKhkX86OKGG5IMa5cuYKsrCzY29tb9ZXAbUVgYCDatm0LQRAMIUJuduzYAQAYOHCgxJVQdTDckIjhhhRD/IPWtm1bLt5nJcTQIIYIOUlPT0dCQgLUajUGDBggdTlUDVzMj0QMN6QYHJKyPmK4+fXXX2U3sVUMXI899hjq1q0rcTVUHeJifpmZmUhPT5e6HJIQww0pBsON9enYsSN8fX1RUFAgu1PCOSRlfVxcXNCmTRsAHJqydQw3pAglJSWIi4sDoKxwo1arERkZicjISEUsk38vOzs7PPHEEwDkNTRVUlKC33//HYAyw42S2xXn3RDAcEMKceLECZSXl6N+/foICQmRuhyTsbe3x7BhwzBs2DBFLJNflaeeegqAvMLNvn37UFJSgqCgILRu3VrqckxOye2K4YYAhhtSCKUt3mdL+vXrBzs7OyQnJ+PSpUtSlwOg8pAU25N1qbiYX0lJicTVkFQYbkgRxCXyu3TpInElZCxvb29069YNgDx6bwRB4HwbKxYaGorAwECUl5fjyJEjUpdDEmG4IatXXl5uCDe9e/eWuBrT0mg0mDdvHubNmweNRiN1OWYjp1PCz507hytXrsDJyUlx7Umk5HalUqnQq1cvADDMmyLbw3BDVu/kyZO4c+cOfHx8EBUVJXU5VANiuNm/fz+KiookrUUMWL169YKbm5uktVDNiOFm3759EldCUmG4Iasnfjrr2bOn4s78sBXNmzdHWFgYysrKsHfvXklr4ZCU9RN73I4ePYrCwkKJqyEp8J2ArJ4YbpQ6hGALVCqVIUz8/PPPktVx+/Zt/PXXXwAYbqxZeHg4wsLCUF5eLrv1k8gyGG7IqpWVlRnejMSuaLJO4inhO3fulGzp/F9//RU6nQ6RkZEIDw+XpAYyDQ5N2TaGG7JqR48eRUlJCfz9/REZGSl1OVQLPXv2hKurKzIyMhAfHy9JDRySUg6xJ5eTim0Tww1ZNfFTWe/evbkeiZVzdnZG3759AQAbN260+PGLi4vx448/Avi7F4msl9hzc+rUKeTl5UlbDFmcLMLNkiVLEBYWBmdnZ3Tu3BnHjh174H1XrFiB7t27w8fHBz4+PujTp89D70/KJn4qU+qQlFqtRkREBCIiImxisvTzzz8PAFizZg3Ky8steuwffvgBBQUFCA8PN6y7o1S20K4aNGiAJk2aQK/XG5aKINsheaveuHEjpk2bhjlz5uDUqVOIiopC//79cePGjSrvv3//fowcORL79u3D4cOHERwcjH79+iEjI8PClZPUiouLDYt0KXUysb29PUaNGoVRo0Ypbpn8qgwaNAi+vr7IzMzErl27LHrslStXAgDGjx+v2Dd8ka20Kw5N2S7Jf4MXLlyICRMmYPz48YiMjMTy5cvh6upq+ENzr7Vr1+L1119HmzZt0KxZM3zzzTfQ6/WSnz5Klnfo0CFoNBoEBQWhUaNGUpdDJuDo6Gjovfnvf/9rseNevHgR+/fvh0qlwrhx4yx2XDIvLuZnuyQNNxqNBidPnkSfPn0M29RqNfr06VPti54VFxdDq9WiTp06Vf68rKwMBQUFlW6kDBVPAed8G+V46aWXANw9JTw7O9six1y1ahWAu9e5UtKFV21dz549AQAJCQnIycmRthiyKEnDzc2bN6HT6RAQEFBpe0BAALKysqq1jxkzZiAwMLBSQKooNjYWXl5ehltwcHCt6yZ5qDiZWKk0Gg3mz5+P+fPnK26Z/Adp0aIFOnfujPLycnz33XdmP55Op8Pq1asB/B2slM5W2pW/vz9atWoF4O6UBrIdkg9L1caCBQuwYcMGbN26Fc7OzlXeZ9asWcjPzzfcrl69auEqyRzu3LmD48ePA1DuZGKRVquFVquVugyLevHFFwHcHZoy95o3u3fvRkZGBurWrYunn37arMeSE1tpV1zvxjZJGm58fX1hZ2d3X9dzdnY26tWr99DHfvbZZ1iwYAF2796N1q1bP/B+Tk5O8PT0rHQj63fw4EHodDo0atSIwwgK9Nxzz8HFxQXJycnVHqKuKXFuz5gxY+Dk5GTWY5HlcVKxbZI03Dg6OqJ9+/aVJgOLk4Ojo6Mf+LhPPvkEH374IXbt2oUOHTpYolSSGV5yQdk8PT0xbNgwAHjgyQWmkJOTY1jbRuwtImWJiYmBWq1GSkoKrl+/LnU5ZCGSD0tNmzYNK1aswJo1a5CUlISJEyeiqKgI48ePBwCMHTsWs2bNMtz/448/xnvvvYeVK1ciLCwMWVlZyMrK4sXRbIzYxaz0ISlbJs5/2bhxo9l+v//3v/9Bq9WiQ4cOD+0BJuvl7e2Ntm3bAuDQlC2RPNyMGDECn332Gd5//320adMGcXFx2LVrl2GScXp6OjIzMw33X7ZsGTQaDYYOHYr69esbbp999plUT4EsLDc3F6dPnwbAcKNk3bt3R+PGjVFYWIjNmzebfP+CIBiGpGxlIrGtEnt4uWSI7ZA83ADA5MmTceXKFZSVleHo0aPo3Lmz4Wf79+83nMkAAGlpaRAE4b7b3LlzLV84SeKXX36BIAho1arVI+dmkfVSqVSGoaKvv/7a5BOLjxw5grNnz8LZ2RnPPfecSfdN8iJe1mPnzp3Q6/USV0OWIItwQ2QMcY6ELZzZolKpEBoaitDQUJtcy+eFF16Ao6Mjjhw5gp9++slk+xUEwTDc/dxzz8Hb29tk+7YGttauYmJi4OHhgezsbJw4cULqcsgCVIK5z7OUmYKCAnh5eSE/P59nTlkhjUYDPz8/FBQU4MiRI5V6+UiZZs2ahQULFqBx48Y4e/YsHB0da73PLVu24B//+AecnZ2RnJyM0NBQE1RKcjZ8+HBs3rwZ7777Lv79739LXQ6ZGXtuyKocPHgQBQUFCAgIQMeOHaUuhyzgnXfeQUBAAC5cuIAvv/yy1vsrLS3F9OnTAQDTp09nsLERgwYNAgCT9gCSfDHckFURh6QGDhyo+Isb0l0eHh6YP38+AOCDDz6o9TL6ixcvxuXLlxEYGIgZM2aYokSyAk8++STUajXOnDmDK1euSF0OmRnfHchqCIJg+NRlC/NtgLvDcJ9++ik+/fRTRS+T/yjjxo1D27ZtUVBQgPfff7/G+8nKysJHH30E4O6lWdzd3U1VolWxxXZVt25ddO3aFQB7b2wBww1ZjbNnz+Ly5ctwcnJ64LXElKi4uBjFxcVSlyEpOzs7LFq0CMDdM6fOnDlTo/3Mnj0bd+7cQceOHTFmzBgTVmh9bLFdiR+KGG6Uj+GGrIb4B+nxxx+Hm5ubxNWQpfXo0QNDhw6FXq/HtGnTjD41/PTp04bVjhctWsRhTRskzrvZt28fCgoKJK6GzIm/3WQ1bOkUcKraJ598AicnJ+zduxdff/11tR+Xn5+PV199FYIgYOTIkejSpYsZqyS5atq0KSIiIqDVarF7926pyyEzYrghq3Djxg0cPXoUAPDUU09JXA1JJTw8HDNnzgQAvPbaa/jggw8e2YNz9epVdOvWDcePH4enpycWLFhgiVJJpjg0ZRsYbsgq7NixA4IgoF27dmjQoIHU5ZCE3n//fUPAmTNnDl5++WVotdoq7xsfH4/HHnsMiYmJqF+/Pv744w9eRd7GiUNTO3bsgE6nk7gaMheGG7IKHJIikVqtRmxsLJYuXQq1Wo2VK1di0KBBuHPnTqX77dmzB927d8f169fRokULHDlyBG3atJGmaJKNrl27wsfHB7du3cLhw4elLofMhOGGZK+0tNQwPi5+6rIVKpUKgYGBCAwMtIll8o0xceJEbNu2Da6urvj111/h5+cHDw8Pw61///64c+cOevbsiT///JM9NhXYcruyt7fHk08+CeDvD02kPLz8Asnezp07MXDgQDRo0ABXr161uT/G9HDHjx/H4MGDkZmZed/Pnn/+eaxYsQJOTk4SVEZytXHjRjz33HNo1qwZkpKSpC6HzMBe6gKIHkX8dDVo0CAGG7pPx44dcfnyZVy7dq3SdhcXFwQGBkpUFcnZgAEDYG9vj+TkZJw/fx5NmjSRuiQyMQ5LkaxpNBps3rwZADBkyBBpiyHZcnJyQqNGjSrdGGzoQby8vPD4448DANavXy9xNWQODDckazt37kRubi7q169v+GNkS7RaLRYtWoRFixY98IwgImOxXd0dsgSA7777zugFIUn+GG5I1r777jsAwKhRo2Bvb3ujqIIgID8/H/n5+fwDTCbDdnW3J9jNzQ0XL17kWVMKxHBDspWbm2tYaGvs2LESV0NESuLm5oahQ4cCAL799luJqyFTY7gh2dq0aRO0Wi1at26N1q1bS10OESmMODS1ceNGlJWVSVwNmRLDDcmW+GmKvTZEZA49e/ZEUFAQ8vLy8PPPP0tdDpkQww3J0oULF3D48GGo1WqMGjVK6nKISIHs7OwwevRoAH/P7yNlYLghWRL/0PTp0wf169eXuBoiUipxaGrHjh24efOmxNWQqTDckOwIgmAIN7Y+JKVSqeDn5wc/Pz8uYEgmw3b1txYtWqBdu3YoLy/Hxo0bpS6HTISXXyDZ+fPPP9G9e3e4ubkhOzsbbm5uUpdERAq2aNEiTJ06FZ06dcLRo0elLodMgD03JDtir83QoUMZbIjI7EaOHAk7OzscO3YMKSkpUpdDJsBwQ7JSWlpq6BoWx8KJiMwpICAA/fv3B8CJxUrBcEOysmnTJuTn5yMoKAg9e/aUuhzJabVaLF26FEuXLrXZZfLJ9Niu7ifO71u5ciU0Go3E1VBtMdyQbAiCgM8++wwA8Prrr8POzk7iiqQnCAJycnKQk5Njs8vkk+mxXd3vmWeeQWBgIDIzM3kxTQVguCHZ+O2335CQkAA3Nze8+uqrUpdDRDbE0dERb7zxBgDg888/Z+izcgw3JBtir82LL76IOnXqSFwNEdmaV155BW5ubkhISMCePXukLodqgeGGZOHMmTPYvXs31Go13nrrLanLISIb5OPjg5dffhnA3x+2yDox3JAsLFy4EADw7LPPomHDhhJXQ0S26s0334RarcaePXtw5swZqcuhGmK4Icldv34d69atAwBMnz5d4mqIyJaFh4dj6NChAO7OvSHrxHBDkvvyyy+h1WrRrVs3dO7cWepyZEWlUsHLywteXl42v0w+mQ7b1cOJH7LWr1+PjIwMiauhmuDlF0hShYWFCA4ORl5eHrZu3YohQ4ZIXRIREXr06IGDBw9ixowZWLBggdTlkJHYc0OSWrlyJfLy8hAREYFBgwZJXQ4REYC/e2+WL1+OO3fuSFwNGYvhhiRTVFSETz75BAAwdepULtpHRLLx1FNPoUmTJsjPz8cXX3whdTlkJIYbkswnn3yCjIwMhIeHY/z48VKXI0tarRYrVqzAihUruEw+mQzb1aOp1WrMmzcPAPDxxx9z7o2VYbghSaSnpxt6bT799FM4OztLXJE8CYKA69ev4/r161wxlUyG7ap6RowYgejoaBQXF+Odd96RuhwyAsMNSWLWrFkoLS1Fjx498Oyzz0pdDhHRfVQqFRYtWgQA+Pbbb3H8+HFpC6JqY7ghizt8+DDWrVsHlUqFL774gqeiEpFsderUCc8//zwA4K233mJPl5VguCGL0uv1mDp1KgBg/PjxaNeuncQVERE9XGxsLFxdXXHo0CFs2rRJ6nKoGhhuyKLWr1+Po0ePwt3dHR999JHU5RARPVKDBg0wc+ZMAMDbb7+NkpISiSuiR2G4IYspKirCjBkzAADvvvsu6tWrJ3FFRETV889//hPBwcFIT0/nZRmsAMMNWcw///lPZGRkICwsjFf+NoKrqytcXV2lLoMUhu3KOK6urvj4448BAP/+978RHx8vcUX0MLz8AlnE5s2bMXz4cKhUKuzevRt9+vSRuiQiIqMIgoBBgwZhx44daNq0KU6cOAF3d3epy6IqsOeGzO7SpUt4+eWXAdw9BZzBhoiskUqlwurVq9GgQQOkpKRg8uTJUpdED8BwQ2al0Wjw3HPPoaCgAF27djWs+ElEZI18fX2xbt06qNVqrFmzBt99953UJVEVGG7IrN555x0cP34cPj4+WLduHezt7aUuyapotVqsXr0aq1ev5jL5ZDJsV7XTo0cPzJkzBwAwceJEpKSkSFwR3Yvhhsxmx44dhrMKVq1ahZCQEIkrsj6CIODKlSu4cuUKFw8jk2G7qr13330XPXv2RFFREUaMGIHS0lKpS6IKGG7ILE6cOIFRo0YBAKZMmYLBgwdLXBHRw1V8k9fpdFV+fe/9yHbZ2dlh7dq18PX1RXx8PEaNGsVeMBlhuCGTO336NPr27YuCggLExMTg008/lbokooc6f/48tm3bhsLCQgB337gAYOnSpZg9eza+/fZbZGdnA7g7qZQBhwAgMDAQGzduhJOTE7Zu3Yrnn38e5eXlUpdFYLghE0tISEDfvn2Rl5eHLl264Oeff4aTk5PUZRE90F9//YUnnngCy5cvx6VLlwzbly1bhsWLFyM6OhpbtmzBkiVLsHPnTgDg9dDIoHfv3tiyZQscHBywceNGjB8//r7ePrI8hhsymaSkJPTp0we3bt1Cp06dsHPnTq4BQbJ27NgxDBgwAC+++CIWL16M1q1bG35WUFCA2NhYPP300/jyyy/h6+uLP/74A3v27JGwYpKjJ598Eps3b4a9vT3+97//YcKECdDr9VKXZdMYbsgkzpw5g8cffxw3btxA27ZtsWvXLnh5eUldFtED5ebm4r333sPMmTPx7rvvolmzZpV+HhQUhM8//xxXr15FcHAwRowYgYCAABw5ckSiiknOBg8ebDhFfNWqVZgwYQI0Go3UZdkshhuqtVWrVqFz587IzMxEq1atsGfPHvj4+EhdlmI4ODjAwcFB6jIUR6vVIjc3F3379jVsy83NxbFjx7B3715069YNo0aNwv/93/8hMzMTAQEB+Mc//oFNmzbh8OHDElZuGmxXpjds2DB89913UKlUWLlyJXr06IH09HSpy7JJvPwC1VhxcTEmTZqE1atXAwAGDBiA//3vf6hbt660hRFVw+HDh9GjRw+cPn0aLVu2xLfffoutW7fixx9/hJOTE/r164dx48YhISEBt2/fxtSpUxESEoKxY8di1KhRGDBggNRPgWTqxx9/xLhx45CXl4c6dergf//7H5544gmpy7IpDDdUIykpKRg6dCgSExOhVqvx4YcfYubMmVCr2RlI1iEnJweDBg1Cbm4uAgMDce7cOTz55JMYOnQokpKSMH/+fGzfvh3169fH5s2bsWbNGnTs2BFnzpzB6dOnDWdUEVXl8uXLGD58OE6cOAHg7oKm8+bN40KmFsJwQ0a5ffs25s+fj//7v/+DRqNBQEAA1q9fj169ekldGpHRVq1ahV27diErKwtz585Fy5Yt4efnBwDw8vLCxx9/jNdeew3A3cnH5eXl6Ny5M+zs7KDT6Rhw6KHKysrwz3/+E0uWLAEAREZG4uOPP8bAgQN5xp2ZMdxQtZSVlWHp0qX48MMPcfv2bQBA//79sXr1atSrV0/i6pSrvLwcmzZtAgAMHz6cn/pMRBAEw5tLeXk51Gq1oddRr9cjKSkJL7/8Mv7v//4PHTt2vO/x1h5s2K4sa+PGjZg0aRJu3boFAOjZsyc+/fRTdOjQQeLKlItjCPRQubm5+PLLL9G8eXNMmzYNt2/fRsuWLbFz50788ssvDDZmptfrkZqaitTUVJ5aakIVF+K7941drVbj66+/hoODAyIiIqp8vDUHG4DtytJGjBiBCxcuYMaMGXBycsL+/fvRsWNHDB8+HPv37+eikGbAcEP30ev12LdvH0aPHo3AwEC88cYbuHz5MurXr49vvvkGcXFxeOKJJ9itSlatYvsVe22OHj2KCRMmYMOGDVizZg28vb355k8m4e3tjQULFuD8+fN4/vnnoVKpsHnzZvTq1QtNmjTBggULkJmZKXWZiiGLcLNkyRKEhYXB2dkZnTt3xrFjxx56/82bN6NZs2ZwdnZGq1atDKuGUs3l5ubi+++/xyuvvILw8HD07t0b69atQ1lZGaKiovDll18iNTUVL730ktV/aiWF0umA/fuB9evv/mvkKrEff/wx3n77bSQlJeHo0aMIDw+HTqfjJHkyqZCQEHz77beIi4vDq6++Cg8PD1y4cAGzZs1CcHAwYmJi8NFHH+H48eNc6bgWJB9o3bhxI6ZNm4bly5ejc+fOWLRoEfr374+UlBT4+/vfd/9Dhw5h5MiRiI2NxVNPPYV169ZhyJAhOHXqFFq2bCnBM7A+paWlOHfuHOLi4hAfH48jR47g+PHjlbpGPTw8MGrUKLz88sto3749e2lI3rZsAd58E7h27e9tQUHA4sXAs89WaxdjxoxBw4YN0bt3b9StW9fq59WQvLVu3RrLly/H559/js2bN2PFihU4dOgQDhw4gAMHDmD27NmoU6cOYmJi0K5dO0RFRaFNmzYICgri3+NqkHxCcefOndGxY0d89dVXAO4OiQQHB2PKlCmYOXPmffcfMWIEioqK8PPPPxu2PfbYY2jTpg2WL1/+yOMpfUKxTqdDbm4ucnJycPPmTWRmZuLKlStIS0tDWloaLl++jNTU1Co/EURGRqJfv37o168fYmJi4OrqKsEzoIo0Gg1iY2MBALNmzYKjo6PEFcnQli3A0KHAvX/KxDeA77+vdsAR6fV6RffYsF3J06VLl7B7927s3r0be/fuRUFBwX33qVOnDiIiIhAWFma4BQcHw9/fH35+fvDz84Orq6vNByBJe240Gg1OnjyJWbNmGbap1Wr06dPngSuAHj58GNOmTau0rX///ti2bZs5S32kmzdv4vfffzf0flT8996bXq83/CvedDodysvLodPpDF9rtVpotVpoNBpotVqUlZWhpKQEpaWlKCkpQXFxMe7cuYOCggLcuXMHd+7cwe3bt6s1R6Bu3bqGTwJt27ZFr1690KBBA7O+RkQmp9Pd7bGp6jOaINwNOG+9BQweDBjRC6PkYEPy1bBhQ7z22mt47bXXUF5ejmPHjuHw4cOIj49HXFwckpKSkJubi6NHj+Lo0aMP3I+Liwu8vb3h4eEBDw8PeHp6wt3dHS4uLnB2djb86+TkZFipWrzZ2dnB3t4ednZ2hpt4NqFKpTL8W9UNgOHrunXr4vHHH7fUS3cfScPNzZs3odPpEBAQUGl7QEAAkpOTq3xMVlZWlffPysqq8v5lZWUoKyszfJ+fnw8AVSbi2oiLi8OIESNMus/a8Pb2Rt26deHv74/g4GCEhoYiJCQEwcHBaNKkCQIDA+9L9qZ+Taj2NBoNSktLAdz9/+En7HscPFh5KOpeggBcvQrs2gV07265umSO7co6tGzZstJ0i7KyMqSkpCAtLQ1Xr17FlStXkJ6ejuvXr+PmzZu4efOm4UNwSUmJpBOUO3bsiN9++80s+/bw8Hhkz5Tkc27MLTY2FvPmzbtve3BwsATVWE5eXh7y8vJw8eJFRVwHh4AFCxZIXYL1euopqSuQLbYrMofjx4+b7eLJ1ZlWImm48fX1hZ2dHbKzsyttz87OfuD6KfXq1TPq/rNmzao0jKXX65Gbm4u6deuaZUyyoKAAwcHBuHr1qiLn9JgSX6vq4etUfXytqo+vVfXxtao+S7xWHh4ej7yPpOHG0dER7du3x969ezFkyBAAd8PH3r17MXny5CofEx0djb179+Ktt94ybNuzZw+io6OrvL+TkxOcnJwqbfP29jZF+Q/l6enJX4Jq4mtVPXydqo+vVfXxtao+vlbVJ/VrJfmw1LRp0zBu3Dh06NABnTp1wqJFi1BUVITx48cDAMaOHYsGDRoYZva/+eabiImJweeff46BAwdiw4YNOHHiBL7++mspnwYRERHJhOThZsSIEcjJycH777+PrKwstGnTBrt27TJMGk5PT6905kKXLl2wbt06zJ49G++88w4iIiKwbds2rnFDREREAGQQbgBg8uTJDxyG2r9//33bhg0bhmHDhpm5qppxcnLCnDlz7hsKo/vxtaoevk7Vx9eq+vhaVR9fq+qTy2sl+SJ+RERERKbElaqIiIhIURhuiIiISFEYboiIiEhRGG6IiIhIURhuzOjpp59GSEgInJ2dUb9+fTz//PO4fv261GXJTlpaGl566SWEh4fDxcUFjRo1wpw5c6DRaKQuTZY++ugjdOnSBa6urhZZkNKaLFmyBGFhYXB2dkbnzp1x7NgxqUuSnQMHDmDQoEGG68tJfdFhOYuNjUXHjh3h4eEBf39/DBkyBCkpKVKXJTvLli1D69atDQv3RUdH45dffpG0JoYbM+rVqxc2bdqElJQU/PDDD7h48SKGDh0qdVmyk5ycDL1ej//85z84e/YsvvjiCyxfvhzvvPOO1KXJkkajwbBhwzBx4kSpS5GVjRs3Ytq0aZgzZw5OnTqFqKgo9O/fHzdu3JC6NFkpKipCVFQUlixZInUpsvfHH39g0qRJOHLkCPbs2QOtVot+/fqhqKhI6tJkJSgoCAsWLMDJkydx4sQJ9O7dG4MHD8bZs2elK0ogi9m+fbugUqkEjUYjdSmy98knnwjh4eFSlyFrq1atEry8vKQuQzY6deokTJo0yfC9TqcTAgMDhdjYWAmrkjcAwtatW6Uuw2rcuHFDACD88ccfUpciez4+PsI333wj2fHZc2Mhubm5WLt2Lbp06QIHBwepy5G9/Px81KlTR+oyyEpoNBqcPHkSffr0MWxTq9Xo06cPDh8+LGFlpCT5+fkAwL9ND6HT6bBhwwYUFRU98JqPlsBwY2YzZsyAm5sb6tati/T0dGzfvl3qkmTvwoUL+PLLL/Hqq69KXQpZiZs3b0Kn0xku2yIKCAhAVlaWRFWRkuj1erz11lvo2rUrL/dThYSEBLi7u8PJyQmvvfYatm7disjISMnqYbgx0syZM6FSqR56S05ONtz/X//6F06fPo3du3fDzs4OY8eOhWAji0Ib+1oBQEZGBgYMGIBhw4ZhwoQJElVueTV5rYjIciZNmoTExERs2LBB6lJkqWnTpoiLi8PRo0cxceJEjBs3DufOnZOsHl5+wUg5OTm4devWQ+/TsGFDODo63rf92rVrCA4OxqFDhyTtrrMUY1+r69evo2fPnnjsscewevXqShdMVbqatKvVq1fjrbfeQl5enpmrkz+NRgNXV1d8//33GDJkiGH7uHHjkJeXxx7TB1CpVNi6dWul14zuN3nyZGzfvh0HDhxAeHi41OVYhT59+qBRo0b4z3/+I8nxZXHhTGvi5+cHPz+/Gj1Wr9cDAMrKykxZkmwZ81plZGSgV69eaN++PVatWmVTwQaoXbsiwNHREe3bt8fevXsNb9R6vR579+594EV5iR5FEARMmTIFW7duxf79+xlsjKDX6yV9r2O4MZOjR4/i+PHj6NatG3x8fHDx4kW89957aNSokU302hgjIyMDPXv2RGhoKD777DPk5OQYflavXj0JK5On9PR05ObmIj09HTqdDnFxcQCAxo0bw93dXdriJDRt2jSMGzcOHTp0QKdOnbBo0SIUFRVh/PjxUpcmK4WFhbhw4YLh+8uXLyMuLg516tRBSEiIhJXJz6RJk7Bu3Tps374dHh4ehvlbXl5ecHFxkbg6+Zg1axaeeOIJhISE4M6dO1i3bh3279+PX3/9VbqiJDtPS+HOnDkj9OrVS6hTp47g5OQkhIWFCa+99ppw7do1qUuTnVWrVgkAqrzR/caNG1fla7Vv3z6pS5Pcl19+KYSEhAiOjo5Cp06dhCNHjkhdkuzs27evyvYzbtw4qUuTnQf9XVq1apXUpcnKiy++KISGhgqOjo6Cn5+f8Pjjjwu7d++WtCbOuSEiIiJFsa2JDURERKR4DDdERESkKAw3REREpCgMN0RERKQoDDdERESkKAw3REREpCgMN0RERKQoDDdEpGirV6+Gt7f3I++nUqmwbds2s9dDRObHcENEJqHT6dClSxc8++yzlbbn5+cjODgY77777gMf27NnT8PVz52dnREZGYmlS5eapK4RI0bg/Pnzhu/nzp2LNm3a3He/zMxMPPHEEyY5JhFJi+GGiEzCzs4Oq1evxq5du7B27VrD9ilTpqBOnTqYM2fOQx8/YcIEZGZm4ty5cxg+fDgmTZqE9evX17ouFxcX+Pv7P/J+9erVg5OTU62PR0TSY7ghIpNp0qQJFixYgClTpiAzMxPbt2/Hhg0b8O2338LR0fGhj3V1dUW9evXQsGFDzJ07FxEREfjxxx8B3L1Y6ODBg+Hu7g5PT08MHz4c2dnZhsfGx8ejV69e8PDwgKenJ9q3b48TJ04AqDwstXr1asybNw/x8fGGnqLVq1cDuH9YKiEhAb1794aLiwvq1q2LV155BYWFhYafv/DCCxgyZAg+++wz1K9fH3Xr1sWkSZOg1WpN8EoSUW3wquBEZFJTpkzB1q1b8fzzzyMhIQHvv/8+oqKijN6Pi4sLNBoN9Hq9Idj88ccfKC8vx6RJkzBixAjs378fADB69Gi0bdsWy5Ytg52dHeLi4uDg4HDfPkeMGIHExETs2rULv/32G4C7V3i+V1FREfr374/o6GgcP34cN27cwMsvv4zJkycbwhAA7Nu3D/Xr18e+fftw4cIFjBgxAm3atMGECROMfr5EZDoMN0RkUiqVCsuWLUPz5s3RqlUrzJw506jH63Q6rF+/HmfOnMErr7yCvXv3IiEhAZcvX0ZwcDAA4Ntvv0WLFi1w/PhxdOzYEenp6fjXv/6FZs2aAQAiIiKq3LeLiwvc3d1hb2+PevXqPbCGdevWobS0FN9++y3c3NwAAF999RUGDRqEjz/+GAEBAQAAHx8ffPXVV7Czs0OzZs0wcOBA7N27l+GGSGIcliIik1u5ciVcXV1x+fJlXLt2rVqPWbp0Kdzd3eHi4oIJEyZg6tSpmDhxIpKSkhAcHGwINgAQGRkJb29vJCUlAQCmTZuGl19+GX369MGCBQtw8eLFWtWflJSEqKgoQ7ABgK5du0Kv1yMlJcWwrUWLFrCzszN8X79+fdy4caNWxyai2mO4ISKTOnToEL744gv8/PPP6NSpE1566SUIgvDIx40ePRpxcXG4fPkyioqKsHDhQqjV1fsTNXfuXJw9exYDBw7E77//jsjISGzdurW2T+WR7h36UqlU0Ov1Zj8uET0cww0RmUxxcTFeeOEFTJw4Eb169cJ///tfHDt2DMuXL3/kY728vNC4cWM0aNCgUqhp3rw5rl69iqtXrxq2nTt3Dnl5eYiMjDRsa9KkCaZOnYrdu3fj2WefxapVq6o8jqOjI3Q63UNrad68OeLj41FUVGTY9tdff0GtVqNp06aPfC5EJC2GGyIymVmzZkEQBCxYsAAAEBYWhs8++wxvv/020tLSarTPPn36oFWrVhg9ejROnTqFY8eOYezYsYiJiUGHDh1QUlKCyZMnY//+/bhy5Qr++usvHD9+HM2bN69yf2FhYbh8+TLi4uJw8+ZNlJWV3Xef0aNHw9nZGePGjUNiYiL27duHKVOm4PnnnzfMtyEi+WK4ISKT+OOPP7BkyRKsWrUKrq6uhu2vvvoqunTpUu3hqXupVCps374dPj4+6NGjB/r06YOGDRti48aNAO6ur3Pr1i2MHTsWTZo0wfDhw/HEE09g3rx5Ve7vH//4BwYMGIBevXrBz8+vyrV0XF1d8euvvyI3NxcdO3bE0KFD8fjjj+Orr74yun4isjyVUJO/NkREREQyxZ4bIiIiUhSGGyIiIlIUhhsiIiJSFIYbIiIiUhSGGyIiIlIUhhsiIiJSFIYbIiIiUhSGGyIiIlIUhhsiIiJSFIYbIiIiUhSGGyIiIlIUhhsiIiJSlP8Hgr/Q6ww3t54AAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" } ], "source": [ @@ -547,6 +540,7 @@ " env,\n", " n_samples=batch_size,\n", " scale_factor=scale_schedule[iteration],\n", + " save_estimator_outputs=True,\n", " )\n", " training_samples = gflownet.to_training_samples(trajectories)\n", "\n", @@ -612,7 +606,6 @@ "gflownet = TBGFlowNet(\n", " pf=pf_estimator,\n", " pb=pb_estimator,\n", - " off_policy=True,\n", " init_logZ=0.0,\n", ")\n", "\n", @@ -766,7 +759,6 @@ "gflownet = TBGFlowNet(\n", " pf=pf_estimator,\n", " pb=pb_estimator,\n", - " off_policy=True, # No replay buffer.\n", " init_logZ=0.0,\n", ")\n", "\n", @@ -857,7 +849,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.10.11" } }, "nbformat": 4, diff --git a/tutorials/notebooks/intro_gfn_smiley.ipynb b/tutorials/notebooks/intro_gfn_smiley.ipynb index 7552ac9a..e8f95d41 100644 --- a/tutorials/notebooks/intro_gfn_smiley.ipynb +++ b/tutorials/notebooks/intro_gfn_smiley.ipynb @@ -1987,7 +1987,6 @@ "gflownet = TBGFlowNet(\n", " pf=pf_estimator,\n", " pb=pb_estimator,\n", - " off_policy=False, # No replay buffer.\n", ")\n", "\n", "# Policy parameters recieve one LR, and LogZ gets a dedicated, typically higher LR.\n", From 70ae3e1608ad8f079761d42c22af736a7798b5f6 Mon Sep 17 00:00:00 2001 From: Salem Date: Thu, 21 Mar 2024 20:35:01 +0400 Subject: [PATCH 04/13] revert back to default TB in script --- tutorials/examples/train_hypergrid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/examples/train_hypergrid.py b/tutorials/examples/train_hypergrid.py index 97157880..e75357fc 100644 --- a/tutorials/examples/train_hypergrid.py +++ b/tutorials/examples/train_hypergrid.py @@ -297,7 +297,7 @@ def main(args): # noqa: C901 "--loss", type=str, choices=["FM", "TB", "DB", "SubTB", "ZVar", "ModifiedDB"], - default="FM", + default="TB", help="Loss function to use", ) parser.add_argument( From 87c29b55d46422d7a4486bf91fe82adbfd554f11 Mon Sep 17 00:00:00 2001 From: Salem Date: Thu, 21 Mar 2024 20:37:04 +0400 Subject: [PATCH 05/13] pre-commit stuff --- src/gfn/containers/replay_buffer.py | 3 ++- src/gfn/env.py | 2 +- src/gfn/states.py | 2 +- testing/test_environments.py | 4 +++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/gfn/containers/replay_buffer.py b/src/gfn/containers/replay_buffer.py index 880314ea..c1679d9a 100644 --- a/src/gfn/containers/replay_buffer.py +++ b/src/gfn/containers/replay_buffer.py @@ -1,9 +1,10 @@ from __future__ import annotations import os -import torch from typing import TYPE_CHECKING, Literal +import torch + from gfn.containers.trajectories import Trajectories from gfn.containers.transitions import Transitions diff --git a/src/gfn/env.py b/src/gfn/env.py index 9b045ca3..d52306f6 100644 --- a/src/gfn/env.py +++ b/src/gfn/env.py @@ -389,7 +389,7 @@ class DiscreteEnvStates(DiscreteStates): def make_actions_class(self) -> type[Actions]: env = self - n_actions = self.n_actions + self.n_actions class DiscreteEnvActions(Actions): action_shape = env.action_shape diff --git a/src/gfn/states.py b/src/gfn/states.py index 38323abe..d7027873 100644 --- a/src/gfn/states.py +++ b/src/gfn/states.py @@ -1,6 +1,6 @@ from __future__ import annotations # This allows to use the class name in type hints -from abc import ABC, abstractmethod +from abc import ABC from copy import deepcopy from math import prod from typing import Callable, ClassVar, Optional, Sequence, cast diff --git a/testing/test_environments.py b/testing/test_environments.py index b110baac..5dbd4cc6 100644 --- a/testing/test_environments.py +++ b/testing/test_environments.py @@ -209,7 +209,9 @@ def test_box_fwd_step(delta: float): ] for failing_actions_list in failing_actions_lists_at_s0: - actions = env.actions_from_tensor(format_tensor(failing_actions_list, discrete=False)) + actions = env.actions_from_tensor( + format_tensor(failing_actions_list, discrete=False) + ) with pytest.raises(NonValidActionsError): states = env._step(states, actions) From feacb14bc46dec09946393e7f336e7f888de014a Mon Sep 17 00:00:00 2001 From: Joseph Viviano Date: Wed, 27 Mar 2024 14:22:12 -0400 Subject: [PATCH 06/13] added has_log_probs function --- src/gfn/containers/trajectories.py | 6 +++++- src/gfn/containers/transitions.py | 10 +++++----- src/gfn/env.py | 2 +- src/gfn/gflownet/base.py | 7 ++----- src/gfn/gflownet/detailed_balance.py | 15 +++++---------- src/gfn/utils/common.py | 8 ++++++++ 6 files changed, 26 insertions(+), 22 deletions(-) diff --git a/src/gfn/containers/trajectories.py b/src/gfn/containers/trajectories.py index 3e72bb3d..4b2645ba 100644 --- a/src/gfn/containers/trajectories.py +++ b/src/gfn/containers/trajectories.py @@ -14,6 +14,7 @@ from gfn.containers.base import Container from gfn.containers.transitions import Transitions +from gfn.utils.common import has_log_probs def is_tensor(t) -> bool: @@ -325,11 +326,14 @@ def to_transitions(self) -> Transitions: ], dim=0, ) + + # Only return logprobs if they exist. log_probs = ( self.log_probs[~self.actions.is_dummy] - if self.log_probs is not None and self.log_probs.nelement() > 0 + if has_log_probs(self) else None ) + return Transitions( env=self.env, states=states, diff --git a/src/gfn/containers/transitions.py b/src/gfn/containers/transitions.py index ef59d2ab..a3c920af 100644 --- a/src/gfn/containers/transitions.py +++ b/src/gfn/containers/transitions.py @@ -11,6 +11,7 @@ from gfn.states import States from gfn.containers.base import Container +from gfn.utils.common import has_log_probs class Transitions(Container): @@ -186,11 +187,10 @@ def __getitem__(self, index: int | Sequence[int]) -> Transitions: log_rewards = ( self._log_rewards[index] if self._log_rewards is not None else None ) - log_probs = ( - self.log_probs[index] - if self.log_probs is not None and self.log_probs.nelement() > 0 - else None - ) + + # Only return logprobs if they exist. + log_probs = self.log_probs[index] if has_log_probs(self) else None + return Transitions( env=self.env, states=states, diff --git a/src/gfn/env.py b/src/gfn/env.py index c4d8dbc5..510d3820 100644 --- a/src/gfn/env.py +++ b/src/gfn/env.py @@ -393,7 +393,7 @@ class DiscreteEnvStates(DiscreteStates): def make_actions_class(self) -> type[Actions]: env = self - self.n_actions + n_actions = self.n_actions class DiscreteEnvActions(Actions): action_shape = env.action_shape diff --git a/src/gfn/gflownet/base.py b/src/gfn/gflownet/base.py index c3d4cdad..0624348a 100644 --- a/src/gfn/gflownet/base.py +++ b/src/gfn/gflownet/base.py @@ -13,6 +13,7 @@ from gfn.modules import GFNModule from gfn.samplers import Sampler from gfn.states import States +from gfn.utils.common import has_log_probs TrainingSampleType = TypeVar( "TrainingSampleType", bound=Union[Container, tuple[States, ...]] @@ -164,11 +165,7 @@ def get_pfs_and_pbs( if valid_states.batch_shape != tuple(valid_actions.batch_shape): raise AssertionError("Something wrong happening with log_pf evaluations") - if ( - trajectories.log_probs is not None - and trajectories.log_probs.nelement() > 0 - and not recalculate_all - ): + if has_log_probs(trajectories) and not recalculate_all: log_pf_trajectories = trajectories.log_probs else: if trajectories.estimator_outputs is not None and not recalculate_all: diff --git a/src/gfn/gflownet/detailed_balance.py b/src/gfn/gflownet/detailed_balance.py index cba6bfc8..7e3ae86a 100644 --- a/src/gfn/gflownet/detailed_balance.py +++ b/src/gfn/gflownet/detailed_balance.py @@ -8,6 +8,7 @@ from gfn.env import Env from gfn.gflownet.base import PFBasedGFlowNet from gfn.modules import GFNModule, ScalarEstimator +from gfn.utils.common import has_log_probs class DBGFlowNet(PFBasedGFlowNet[Transitions]): @@ -72,11 +73,8 @@ def get_scores( if states.batch_shape != tuple(actions.batch_shape): raise ValueError("Something wrong happening with log_pf evaluations") - if ( - transitions.log_probs is not None - and transitions.log_probs.nelement() > 0 - and not recalculate_all - ): + + if has_log_probs(transitions) and not recalculate_all: valid_log_pf_actions = transitions.log_probs else: # Evaluate the log PF of the actions @@ -179,11 +177,8 @@ def get_scores( all_log_rewards = transitions.all_log_rewards[mask] module_output = self.pf(states) pf_dist = self.pf.to_probability_distribution(states, module_output) - if ( - transitions.log_probs is not None - and transitions.log_probs.nelement() > 0 - and not recalculate_all - ): + + if has_log_probs(transitions) and not recalculate_all: valid_log_pf_actions = transitions[mask].log_probs else: # Evaluate the log PF of the actions sampled off policy. diff --git a/src/gfn/utils/common.py b/src/gfn/utils/common.py index cc5b97a7..839fc7ce 100644 --- a/src/gfn/utils/common.py +++ b/src/gfn/utils/common.py @@ -15,3 +15,11 @@ def set_seed(seed: int, performance_mode: bool = False) -> None: if not performance_mode: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False + + +def has_log_probs(obj): + """Returns True if the submitted object has the log_probs attribute populated.""" + if not isinstance(obj, "log_probs"): + return False + + return obj.log_probs is not None and obj.log_probs.nelement() > 0 From cc67a59fe66a3cb373b3fa2ffb0f0a6554d569bf Mon Sep 17 00:00:00 2001 From: Joseph Viviano Date: Wed, 27 Mar 2024 14:57:44 -0400 Subject: [PATCH 07/13] hasattr not isinstance --- src/gfn/utils/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gfn/utils/common.py b/src/gfn/utils/common.py index 839fc7ce..6094a179 100644 --- a/src/gfn/utils/common.py +++ b/src/gfn/utils/common.py @@ -19,7 +19,7 @@ def set_seed(seed: int, performance_mode: bool = False) -> None: def has_log_probs(obj): """Returns True if the submitted object has the log_probs attribute populated.""" - if not isinstance(obj, "log_probs"): + if not hasattr(obj, "log_probs"): return False return obj.log_probs is not None and obj.log_probs.nelement() > 0 From a50af8eddfb1d4ba0e58a44a940731f22d4c03d4 Mon Sep 17 00:00:00 2001 From: Joseph Viviano Date: Wed, 27 Mar 2024 14:59:27 -0400 Subject: [PATCH 08/13] black --- pyproject.toml | 2 +- src/gfn/containers/trajectories.py | 4 +--- src/gfn/gflownet/base.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 36947af0..16a2324a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ torch = ">=1.9.0" torchtyping = ">=0.1.4" # dev dependencies. -black = { version = "24.2", optional = true } +black = { version = "24.3", optional = true } flake8 = { version = "*", optional = true } gitmopy = { version = "*", optional = true } myst-parser = { version = "*", optional = true } diff --git a/src/gfn/containers/trajectories.py b/src/gfn/containers/trajectories.py index 4b2645ba..35196ec3 100644 --- a/src/gfn/containers/trajectories.py +++ b/src/gfn/containers/trajectories.py @@ -329,9 +329,7 @@ def to_transitions(self) -> Transitions: # Only return logprobs if they exist. log_probs = ( - self.log_probs[~self.actions.is_dummy] - if has_log_probs(self) - else None + self.log_probs[~self.actions.is_dummy] if has_log_probs(self) else None ) return Transitions( diff --git a/src/gfn/gflownet/base.py b/src/gfn/gflownet/base.py index 0624348a..8bb15fd9 100644 --- a/src/gfn/gflownet/base.py +++ b/src/gfn/gflownet/base.py @@ -96,7 +96,7 @@ def sample_trajectories( n_samples: int, save_logprobs: bool = True, save_estimator_outputs: bool = False, - **policy_kwargs + **policy_kwargs, ) -> Trajectories: """Samples trajectories, optionally with specified policy kwargs.""" sampler = Sampler(estimator=self.pf) From 8b2d12421108518bb8162020bcb57bcde08bf4fe Mon Sep 17 00:00:00 2001 From: saleml Date: Tue, 2 Apr 2024 11:58:54 +0400 Subject: [PATCH 09/13] remove unused variable --- src/gfn/env.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/gfn/env.py b/src/gfn/env.py index 510d3820..c1569235 100644 --- a/src/gfn/env.py +++ b/src/gfn/env.py @@ -393,7 +393,6 @@ class DiscreteEnvStates(DiscreteStates): def make_actions_class(self) -> type[Actions]: env = self - n_actions = self.n_actions class DiscreteEnvActions(Actions): action_shape = env.action_shape From 368af4c7ce449af86b5ae659fb7f36ea11b0c32b Mon Sep 17 00:00:00 2001 From: saleml Date: Tue, 2 Apr 2024 12:00:38 +0400 Subject: [PATCH 10/13] add back comment referring to issue 156 --- src/gfn/gflownet/detailed_balance.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/gfn/gflownet/detailed_balance.py b/src/gfn/gflownet/detailed_balance.py index 7e3ae86a..28cdd175 100644 --- a/src/gfn/gflownet/detailed_balance.py +++ b/src/gfn/gflownet/detailed_balance.py @@ -81,6 +81,9 @@ def get_scores( module_output = self.pf( states ) # TODO: Inefficient duplication in case of tempered policy + # The Transitions container should then have some + # estimator_outputs attribute as well, to avoid duplication here ? + # See (#156). valid_log_pf_actions = self.pf.to_probability_distribution( states, module_output ).log_prob(actions.tensor) From bff764df00e1ae4fb2ec74181ad2accfea8b698e Mon Sep 17 00:00:00 2001 From: saleml Date: Tue, 2 Apr 2024 12:01:45 +0400 Subject: [PATCH 11/13] change recalculate_all to recalculate_all_logprobs --- src/gfn/gflownet/base.py | 12 ++++++------ src/gfn/gflownet/detailed_balance.py | 12 ++++++------ src/gfn/gflownet/trajectory_balance.py | 8 ++++---- testing/test_parametrizations_and_losses.py | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/gfn/gflownet/base.py b/src/gfn/gflownet/base.py index 8bb15fd9..2711e09e 100644 --- a/src/gfn/gflownet/base.py +++ b/src/gfn/gflownet/base.py @@ -122,7 +122,7 @@ def get_pfs_and_pbs( self, trajectories: Trajectories, fill_value: float = 0.0, - recalculate_all: bool = False, + recalculate_all_logprobs: bool = False, ) -> Tuple[ TT["max_length", "n_trajectories", torch.float], TT["max_length", "n_trajectories", torch.float], @@ -132,7 +132,7 @@ def get_pfs_and_pbs( More specifically it evaluates $\log P_F (s' \mid s)$ and $\log P_B(s \mid s')$ for each transition in each trajectory in the batch. - Unless recalculate_all=True, in which case we re-evaluate the logprobs of the trajectories with + Unless recalculate_all_logprobs=True, in which case we re-evaluate the logprobs of the trajectories with the current self.pf. The following applies: - If trajectories have log_probs attribute, use them - this is usually for on-policy learning - Else, if trajectories have estimator_outputs attribute, transform them @@ -165,10 +165,10 @@ def get_pfs_and_pbs( if valid_states.batch_shape != tuple(valid_actions.batch_shape): raise AssertionError("Something wrong happening with log_pf evaluations") - if has_log_probs(trajectories) and not recalculate_all: + if has_log_probs(trajectories) and not recalculate_all_logprobs: log_pf_trajectories = trajectories.log_probs else: - if trajectories.estimator_outputs is not None and not recalculate_all: + if trajectories.estimator_outputs is not None and not recalculate_all_logprobs: estimator_outputs = trajectories.estimator_outputs[ ~trajectories.actions.is_dummy ] @@ -214,7 +214,7 @@ def get_pfs_and_pbs( def get_trajectories_scores( self, trajectories: Trajectories, - recalculate_all: bool = False, + recalculate_all_logprobs: bool = False, ) -> Tuple[ TT["n_trajectories", torch.float], TT["n_trajectories", torch.float], @@ -222,7 +222,7 @@ def get_trajectories_scores( ]: """Given a batch of trajectories, calculate forward & backward policy scores.""" log_pf_trajectories, log_pb_trajectories = self.get_pfs_and_pbs( - trajectories, recalculate_all=recalculate_all + trajectories, recalculate_all_logprobs=recalculate_all_logprobs ) assert log_pf_trajectories is not None diff --git a/src/gfn/gflownet/detailed_balance.py b/src/gfn/gflownet/detailed_balance.py index 28cdd175..3d97b1ad 100644 --- a/src/gfn/gflownet/detailed_balance.py +++ b/src/gfn/gflownet/detailed_balance.py @@ -42,7 +42,7 @@ def __init__( self.log_reward_clip_min = log_reward_clip_min def get_scores( - self, env: Env, transitions: Transitions, recalculate_all: bool = False + self, env: Env, transitions: Transitions, recalculate_all_logprobs: bool = False ) -> Tuple[ TT["n_transitions", float], TT["n_transitions", float], @@ -53,7 +53,7 @@ def get_scores( Args: transitions: a batch of transitions. - Unless recalculate_all=True, in which case we re-evaluate the logprobs of the transitions with + Unless recalculate_all_logprobs=True, in which case we re-evaluate the logprobs of the transitions with the current self.pf. The following applies: - If transitions have log_probs attribute, use them - this is usually for on-policy learning - Else, re-evaluate the log_probs using the current self.pf - this is usually for @@ -74,7 +74,7 @@ def get_scores( if states.batch_shape != tuple(actions.batch_shape): raise ValueError("Something wrong happening with log_pf evaluations") - if has_log_probs(transitions) and not recalculate_all: + if has_log_probs(transitions) and not recalculate_all_logprobs: valid_log_pf_actions = transitions.log_probs else: # Evaluate the log PF of the actions @@ -156,11 +156,11 @@ class ModifiedDBGFlowNet(PFBasedGFlowNet[Transitions]): """ def get_scores( - self, transitions: Transitions, recalculate_all: bool = False + self, transitions: Transitions, recalculate_all_logprobs: bool = False ) -> TT["n_trajectories", torch.float]: """DAG-GFN-style detailed balance, when all states are connected to the sink. - Unless recalculate_all=True, in which case we re-evaluate the logprobs of the transitions with + Unless recalculate_all_logprobs=True, in which case we re-evaluate the logprobs of the transitions with the current self.pf. The following applies: - If transitions have log_probs attribute, use them - this is usually for on-policy learning - Else, re-evaluate the log_probs using the current self.pf - this is usually for @@ -181,7 +181,7 @@ def get_scores( module_output = self.pf(states) pf_dist = self.pf.to_probability_distribution(states, module_output) - if has_log_probs(transitions) and not recalculate_all: + if has_log_probs(transitions) and not recalculate_all_logprobs: valid_log_pf_actions = transitions[mask].log_probs else: # Evaluate the log PF of the actions sampled off policy. diff --git a/src/gfn/gflownet/trajectory_balance.py b/src/gfn/gflownet/trajectory_balance.py index 45db346c..b4abf3a5 100644 --- a/src/gfn/gflownet/trajectory_balance.py +++ b/src/gfn/gflownet/trajectory_balance.py @@ -42,7 +42,7 @@ def __init__( self.log_reward_clip_min = log_reward_clip_min def loss( - self, env: Env, trajectories: Trajectories, recalculate_all: bool = False + self, env: Env, trajectories: Trajectories, recalculate_all_logprobs: bool = False ) -> TT[0, float]: """Trajectory balance loss. @@ -54,7 +54,7 @@ def loss( """ del env # unused _, _, scores = self.get_trajectories_scores( - trajectories, recalculate_all=recalculate_all + trajectories, recalculate_all_logprobs=recalculate_all_logprobs ) loss = (scores + self.logZ).pow(2).mean() if torch.isnan(loss): @@ -83,7 +83,7 @@ def __init__( self.log_reward_clip_min = log_reward_clip_min def loss( - self, env: Env, trajectories: Trajectories, recalculate_all: bool = False + self, env: Env, trajectories: Trajectories, recalculate_all_logprobs: bool = False ) -> TT[0, float]: """Log Partition Variance loss. @@ -92,7 +92,7 @@ def loss( """ del env # unused _, _, scores = self.get_trajectories_scores( - trajectories, recalculate_all=recalculate_all + trajectories, recalculate_all_logprobs=recalculate_all_logprobs ) loss = (scores - scores.mean()).pow(2).mean() if torch.isnan(loss): diff --git a/testing/test_parametrizations_and_losses.py b/testing/test_parametrizations_and_losses.py index 5904ddd4..95b69bc6 100644 --- a/testing/test_parametrizations_and_losses.py +++ b/testing/test_parametrizations_and_losses.py @@ -76,7 +76,7 @@ def test_get_pfs_and_pbs(env_name: str, preprocessor_name: str): log_pfs_on, log_pbs_on = gflownet_on.get_pfs_and_pbs(trajectories) log_pfs_off, log_pbs_off = gflownet_off.get_pfs_and_pbs( - trajectories, recalculate_all=True + trajectories, recalculate_all_logprobs=True ) @@ -92,7 +92,7 @@ def test_get_scores(env_name: str, preprocessor_name: str): gflownet_off = TBGFlowNet(pf=pf_estimator, pb=pb_estimator) scores_on = gflownet_on.get_trajectories_scores(trajectories) scores_off = gflownet_off.get_trajectories_scores( - trajectories, recalculate_all=True + trajectories, recalculate_all_logprobs=True ) assert all( [ From 89c72b5add431fcd8a787323cdc14aee7ee1ffe8 Mon Sep 17 00:00:00 2001 From: saleml Date: Tue, 2 Apr 2024 14:03:27 +0400 Subject: [PATCH 12/13] add forgotten keyword in train_line.py --- tutorials/examples/train_line.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tutorials/examples/train_line.py b/tutorials/examples/train_line.py index 0fb9db09..6ce7fde6 100644 --- a/tutorials/examples/train_line.py +++ b/tutorials/examples/train_line.py @@ -229,6 +229,7 @@ def train( env, n_samples=batch_size, save_estimator_outputs=True, + save_logprobs=False, scale_factor=scale_schedule[iteration], # Off policy kwargs. ) training_samples = gflownet.to_training_samples(trajectories) From 9ae95a5911f72e004dace3d338a5e1b28a0781d8 Mon Sep 17 00:00:00 2001 From: Joseph Viviano Date: Tue, 2 Apr 2024 10:24:20 -0400 Subject: [PATCH 13/13] black --- src/gfn/gflownet/base.py | 5 ++++- src/gfn/gflownet/trajectory_balance.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/gfn/gflownet/base.py b/src/gfn/gflownet/base.py index 2711e09e..032639a2 100644 --- a/src/gfn/gflownet/base.py +++ b/src/gfn/gflownet/base.py @@ -168,7 +168,10 @@ def get_pfs_and_pbs( if has_log_probs(trajectories) and not recalculate_all_logprobs: log_pf_trajectories = trajectories.log_probs else: - if trajectories.estimator_outputs is not None and not recalculate_all_logprobs: + if ( + trajectories.estimator_outputs is not None + and not recalculate_all_logprobs + ): estimator_outputs = trajectories.estimator_outputs[ ~trajectories.actions.is_dummy ] diff --git a/src/gfn/gflownet/trajectory_balance.py b/src/gfn/gflownet/trajectory_balance.py index b4abf3a5..1f8799d9 100644 --- a/src/gfn/gflownet/trajectory_balance.py +++ b/src/gfn/gflownet/trajectory_balance.py @@ -42,7 +42,10 @@ def __init__( self.log_reward_clip_min = log_reward_clip_min def loss( - self, env: Env, trajectories: Trajectories, recalculate_all_logprobs: bool = False + self, + env: Env, + trajectories: Trajectories, + recalculate_all_logprobs: bool = False, ) -> TT[0, float]: """Trajectory balance loss. @@ -83,7 +86,10 @@ def __init__( self.log_reward_clip_min = log_reward_clip_min def loss( - self, env: Env, trajectories: Trajectories, recalculate_all_logprobs: bool = False + self, + env: Env, + trajectories: Trajectories, + recalculate_all_logprobs: bool = False, ) -> TT[0, float]: """Log Partition Variance loss.