From 3d6e8072e1e7915d9f8d89156aa11d2be176d41b Mon Sep 17 00:00:00 2001 From: Alec Helbling Date: Sun, 1 Jan 2023 23:24:59 -0500 Subject: [PATCH] Used Black to reformat the code in the repository. --- .gitignore | 5 +- examples/basic_neural_network.py | 15 +- examples/cnn/cnn.py | 22 +- .../code_snippet/image_nn_code_snippet.py | 25 +- examples/code_snippet/vae_code_landscape.py | 43 ++-- examples/code_snippet/vae_nn_code_snippet.py | 48 ++-- examples/disentanglement/disentanglement.py | 41 +++- examples/epsilon_nn_graph/epsilon_nn_graph.py | 70 ++++-- examples/gan/gan.py | 73 +++--- examples/interpolation/interpolation.py | 30 +-- examples/logo/logo.py | 12 +- examples/logo/website_logo.py | 7 +- .../oracle_guidance/oracle_guidance.py | 230 +++++++++--------- .../generate_disentanglement.py | 45 +++- .../autoencoder_models/generate_images.py | 9 +- .../generate_interpolation.py | 10 +- .../variational_autoencoder.py | 226 ++++++++++++----- .../variational_autoencoder.py | 27 +- manim_ml/decision_tree/decision_tree.py | 9 +- manim_ml/flow/flow.py | 7 +- manim_ml/gridded_rectangle.py | 55 +++-- manim_ml/image.py | 16 +- manim_ml/lazy_animation.py | 4 +- manim_ml/list_group.py | 9 +- manim_ml/manifold.py | 2 +- manim_ml/neural_network/layers/__init__.py | 10 +- .../neural_network/layers/convolutional2d.py | 23 +- .../convolutional2d_to_convolutional2d.py | 96 +++++--- .../neural_network/layers/convolutional3d.py | 58 +++-- .../convolutional3d_to_convolutional3d.py | 224 ++++++++--------- .../convolutional_3d_to_feed_forward.py | 27 +- manim_ml/neural_network/layers/embedding.py | 78 +++--- .../layers/embedding_to_feed_forward.py | 26 +- .../neural_network/layers/feed_forward.py | 46 +++- .../layers/feed_forward_to_embedding.py | 28 ++- .../layers/feed_forward_to_feed_forward.py | 57 +++-- .../layers/feed_forward_to_image.py | 33 ++- .../layers/feed_forward_to_vector.py | 33 ++- manim_ml/neural_network/layers/image.py | 11 +- .../layers/image_to_convolutional3d.py | 47 ++-- .../layers/image_to_feed_forward.py | 31 ++- .../neural_network/layers/paired_query.py | 22 +- .../layers/paired_query_to_feed_forward.py | 36 ++- .../neural_network/layers/parent_layers.py | 20 +- manim_ml/neural_network/layers/triplet.py | 46 ++-- .../layers/triplet_to_feed_forward.py | 43 +++- manim_ml/neural_network/layers/util.py | 12 +- manim_ml/neural_network/layers/vector.py | 8 +- manim_ml/neural_network/neural_network.py | 100 +++++--- .../neural_network_transformations.py | 125 ++++++---- .../neural_network/variational_autoencoder.py | 47 ++-- manim_ml/one_to_one_sync.py | 5 +- manim_ml/probability.py | 49 ++-- manim_ml/scene.py | 12 +- setup.py | 8 +- tests/.DS_Store | Bin 0 -> 8196 bytes tests/test_3d_camera_move.py | 15 +- tests/test_convolutional_2d_layer.py | 8 +- tests/test_convolutional_3d_layer.py | 51 ++-- tests/test_embedding_layer.py | 75 +++--- tests/test_feed_forward_thickness_change.py | 2 - tests/test_flow.py | 4 +- tests/test_image_homotopy.py | 55 +++++ tests/test_layers.py | 5 +- tests/test_neural_network.py | 117 ++++----- tests/test_nn_dropout.py | 39 +++ tests/test_opengl_shader.py | 3 +- tests/test_paired_query.py | 18 +- tests/test_surrounding_rectangle.py | 4 +- tests/test_triplet.py | 18 +- tests/test_variational_autoencoder.py | 29 ++- 71 files changed, 1705 insertions(+), 1139 deletions(-) create mode 100644 tests/.DS_Store create mode 100644 tests/test_image_homotopy.py create mode 100644 tests/test_nn_dropout.py diff --git a/.gitignore b/.gitignore index 0bfd98c..f032d79 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,7 @@ manim_ml/media media pyproject.toml setup.cfg -!examples/media \ No newline at end of file +!examples/media +examples/media/videos +examples/media/text +examples/media/images \ No newline at end of file diff --git a/examples/basic_neural_network.py b/examples/basic_neural_network.py index cc0a1ea..8217c31 100644 --- a/examples/basic_neural_network.py +++ b/examples/basic_neural_network.py @@ -2,22 +2,21 @@ from manim_ml.neural_network.layers import FeedForwardLayer from manim_ml.neural_network.neural_network import NeuralNetwork + class NeuralNetworkScene(Scene): """Test Scene for the Neural Network""" def construct(self): # Make the Layer object - layers = [ - FeedForwardLayer(3), - FeedForwardLayer(5), - FeedForwardLayer(3) - ] + layers = [FeedForwardLayer(3), FeedForwardLayer(5), FeedForwardLayer(3)] nn = NeuralNetwork(layers) nn.scale(2) nn.move_to(ORIGIN) # Make Animation self.add(nn) - #self.play(Create(nn)) - forward_propagation_animation = nn.make_forward_pass_animation(run_time=5, passing_flash=True) + # self.play(Create(nn)) + forward_propagation_animation = nn.make_forward_pass_animation( + run_time=5, passing_flash=True + ) - self.play(forward_propagation_animation) \ No newline at end of file + self.play(forward_propagation_animation) diff --git a/examples/cnn/cnn.py b/examples/cnn/cnn.py index 35601fc..577069f 100644 --- a/examples/cnn/cnn.py +++ b/examples/cnn/cnn.py @@ -15,6 +15,7 @@ config.frame_width = 7.0 ROOT_DIR = Path(__file__).parents[2] + def make_code_snippet(): code_str = """ # Make nn @@ -31,32 +32,34 @@ def make_code_snippet(): """ code = Code( - code = code_str, + code=code_str, tab_width=4, background_stroke_width=1, background_stroke_color=WHITE, insert_line_no=False, - style='monokai', - #background="window", + style="monokai", + # background="window", language="py", ) code.scale(0.50) return code + class CombinedScene(ThreeDScene): def construct(self): - image = Image.open(ROOT_DIR / 'assets/mnist/digit.jpeg') + image = Image.open(ROOT_DIR / "assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional3DLayer(1, 7, 7, 3, 3, filter_spacing=0.32), Convolutional3DLayer(3, 5, 5, 3, 3, filter_spacing=0.32), Convolutional3DLayer(5, 3, 3, 1, 1, filter_spacing=0.18), FeedForwardLayer(3), FeedForwardLayer(3), - ], + ], layer_spacing=0.25, ) # Center the nn @@ -71,10 +74,7 @@ def construct(self): group.move_to(ORIGIN) # Play animation forward_pass = nn.make_forward_pass_animation( - corner_pulses=False, - all_filters_at_once=False + corner_pulses=False, all_filters_at_once=False ) self.wait(1) - self.play( - forward_pass - ) + self.play(forward_pass) diff --git a/examples/code_snippet/image_nn_code_snippet.py b/examples/code_snippet/image_nn_code_snippet.py index 38d400f..c2812a4 100644 --- a/examples/code_snippet/image_nn_code_snippet.py +++ b/examples/code_snippet/image_nn_code_snippet.py @@ -9,8 +9,8 @@ config.frame_height = 6.0 config.frame_width = 6.0 -class ImageNeuralNetworkScene(Scene): +class ImageNeuralNetworkScene(Scene): def make_code_snippet(self): code_str = """ # Make image object @@ -32,13 +32,13 @@ def make_code_snippet(self): """ code = Code( - code = code_str, + code=code_str, tab_width=4, background_stroke_width=1, background_stroke_color=WHITE, insert_line_no=False, - style='monokai', - #background="window", + style="monokai", + # background="window", language="py", ) code.scale(0.2) @@ -46,22 +46,22 @@ def make_code_snippet(self): return code def construct(self): - image = Image.open('../../tests/images/image.jpeg') + image = Image.open("../../tests/images/image.jpeg") numpy_image = np.asarray(image) # Make nn layers = [ ImageLayer(numpy_image, height=1.4), - FeedForwardLayer(3), + FeedForwardLayer(3), FeedForwardLayer(5), FeedForwardLayer(3), - FeedForwardLayer(6) + FeedForwardLayer(6), ] nn = NeuralNetwork(layers) nn.scale(0.9) # Center the nn nn.move_to(ORIGIN) - nn.rotate(-PI/2) - nn.layers[0].image_mobject.rotate(PI/2) + nn.rotate(-PI / 2) + nn.layers[0].image_mobject.rotate(PI / 2) nn.layers[0].image_mobject.shift([0, -0.4, 0]) nn.shift([1.5, 0.3, 0]) self.add(nn) @@ -71,15 +71,14 @@ def construct(self): code_snippet.shift([-1.25, 0, 0]) self.add(code_snippet) # Play animation - self.play( - nn.make_forward_pass_animation(run_time=10) - ) + self.play(nn.make_forward_pass_animation(run_time=10)) + if __name__ == "__main__": """Render all scenes""" # Feed Forward Neural Network ffnn_scene = FeedForwardNeuralNetworkScene() ffnn_scene.render() - # Neural Network + # Neural Network nn_scene = NeuralNetworkScene() nn_scene.render() diff --git a/examples/code_snippet/vae_code_landscape.py b/examples/code_snippet/vae_code_landscape.py index 82e7215..84d35cc 100644 --- a/examples/code_snippet/vae_code_landscape.py +++ b/examples/code_snippet/vae_code_landscape.py @@ -9,8 +9,8 @@ config.frame_height = 6.0 config.frame_width = 6.0 -class VAECodeSnippetScene(Scene): +class VAECodeSnippetScene(Scene): def make_code_snippet(self): code_str = """ # Make Neural Network @@ -28,33 +28,38 @@ def make_code_snippet(self): """ code = Code( - code = code_str, + code=code_str, tab_width=4, background_stroke_width=1, # background_stroke_color=WHITE, insert_line_no=False, background="window", # font="Monospace", - style='one-dark', + style="one-dark", language="py", ) return code def construct(self): - image = Image.open('../../tests/images/image.jpeg') + image = Image.open("../../tests/images/image.jpeg") numpy_image = np.asarray(image) - embedding_layer = EmbeddingLayer(dist_theme="ellipse", point_radius=0.04).scale(1.0) + embedding_layer = EmbeddingLayer(dist_theme="ellipse", point_radius=0.04).scale( + 1.0 + ) # Make nn - nn = NeuralNetwork([ - ImageLayer(numpy_image, height=1.2), - FeedForwardLayer(5), - FeedForwardLayer(3), - embedding_layer, - FeedForwardLayer(3), - FeedForwardLayer(5), - ImageLayer(numpy_image, height=1.2), - ], layer_spacing=0.1) + nn = NeuralNetwork( + [ + ImageLayer(numpy_image, height=1.2), + FeedForwardLayer(5), + FeedForwardLayer(3), + embedding_layer, + FeedForwardLayer(3), + FeedForwardLayer(5), + ImageLayer(numpy_image, height=1.2), + ], + layer_spacing=0.1, + ) nn.scale(1.1) # Center the nn @@ -73,13 +78,11 @@ def construct(self): # code_snippet.shift([-1.25, 0, 0]) self.add(code_snippet) # Play animation - self.play( - nn.make_forward_pass_animation(), - run_time=10 - ) - + self.play(nn.make_forward_pass_animation(), run_time=10) + + if __name__ == "__main__": """Render all scenes""" - # Neural Network + # Neural Network nn_scene = VAECodeSnippetScene() nn_scene.render() diff --git a/examples/code_snippet/vae_nn_code_snippet.py b/examples/code_snippet/vae_nn_code_snippet.py index fa9d67b..6529d78 100644 --- a/examples/code_snippet/vae_nn_code_snippet.py +++ b/examples/code_snippet/vae_nn_code_snippet.py @@ -9,8 +9,8 @@ config.frame_height = 6.0 config.frame_width = 6.0 -class VAECodeSnippetScene(Scene): +class VAECodeSnippetScene(Scene): def make_code_snippet(self): code_str = """ # Make image object @@ -34,14 +34,14 @@ def make_code_snippet(self): """ code = Code( - code = code_str, + code=code_str, tab_width=4, background_stroke_width=1, # background_stroke_color=WHITE, insert_line_no=False, background="window", # font="Monospace", - style='one-dark', + style="one-dark", language="py", ) code.scale(0.2) @@ -49,27 +49,32 @@ def make_code_snippet(self): return code def construct(self): - image = Image.open('../../tests/images/image.jpeg') + image = Image.open("../../tests/images/image.jpeg") numpy_image = np.asarray(image) - embedding_layer = EmbeddingLayer(dist_theme="ellipse", point_radius=0.04).scale(1.0) + embedding_layer = EmbeddingLayer(dist_theme="ellipse", point_radius=0.04).scale( + 1.0 + ) # Make nn - nn = NeuralNetwork([ - ImageLayer(numpy_image, height=1.0), - FeedForwardLayer(5), - FeedForwardLayer(3), - embedding_layer, - FeedForwardLayer(3), - FeedForwardLayer(5), - ImageLayer(numpy_image, height=1.0), - ], layer_spacing=0.1) + nn = NeuralNetwork( + [ + ImageLayer(numpy_image, height=1.0), + FeedForwardLayer(5), + FeedForwardLayer(3), + embedding_layer, + FeedForwardLayer(3), + FeedForwardLayer(5), + ImageLayer(numpy_image, height=1.0), + ], + layer_spacing=0.1, + ) nn.scale(0.65) # Center the nn nn.move_to(ORIGIN) - nn.rotate(-PI/2) - nn.all_layers[0].image_mobject.rotate(PI/2) + nn.rotate(-PI / 2) + nn.all_layers[0].image_mobject.rotate(PI / 2) # nn.all_layers[0].image_mobject.shift([0, -0.4, 0]) - nn.all_layers[-1].image_mobject.rotate(PI/2) + nn.all_layers[-1].image_mobject.rotate(PI / 2) # nn.all_layers[-1].image_mobject.shift([0, -0.4, 0]) nn.shift([1.5, 0.0, 0]) self.add(nn) @@ -79,14 +84,11 @@ def construct(self): code_snippet.shift([-1.25, 0, 0]) self.add(code_snippet) # Play animation - self.play( - nn.make_forward_pass_animation(), - run_time=10 - ) - + self.play(nn.make_forward_pass_animation(), run_time=10) + if __name__ == "__main__": """Render all scenes""" - # Neural Network + # Neural Network nn_scene = VAECodeSnippetScene() nn_scene.render() diff --git a/examples/disentanglement/disentanglement.py b/examples/disentanglement/disentanglement.py index d275e92..75b3785 100644 --- a/examples/disentanglement/disentanglement.py +++ b/examples/disentanglement/disentanglement.py @@ -23,16 +23,24 @@ def construct_image_mobject(input_image, height=2.3): return image_mobject -class DisentanglementVisualization(VGroup): - def __init__(self, model_path=ROOT_DIR / "examples/variational_autoencoder/autoencoder_models/saved_models/model_dim2.pth", image_height=0.35): +class DisentanglementVisualization(VGroup): + def __init__( + self, + model_path=ROOT_DIR + / "examples/variational_autoencoder/autoencoder_models/saved_models/model_dim2.pth", + image_height=0.35, + ): self.model_path = model_path self.image_height = image_height # Load disentanglement image objects - with open(ROOT_DIR/ "examples/variational_autoencoder/autoencoder_models/disentanglement.pkl", "rb") as f: + with open( + ROOT_DIR + / "examples/variational_autoencoder/autoencoder_models/disentanglement.pkl", + "rb", + ) as f: self.image_handler = pickle.load(f) - def make_disentanglement_generation_animation(self): animation_list = [] for image_index, image in enumerate(self.image_handler["images"]): @@ -41,18 +49,24 @@ def make_disentanglement_generation_animation(self): # Move the image to the correct location r_offset = -1.2 c_offset = 0.25 - image_location = [c_offset + c*self.image_height, r_offset + r*self.image_height, 0] + image_location = [ + c_offset + c * self.image_height, + r_offset + r * self.image_height, + 0, + ] image_mobject.move_to(image_location) animation_list.append(FadeIn(image_mobject)) generation_animation = AnimationGroup(*animation_list[::-1], lag_ratio=1.0) return generation_animation + config.pixel_height = 720 config.pixel_width = 1280 config.frame_height = 5.0 config.frame_width = 5.0 + class DisentanglementScene(Scene): """Disentanglement Scene Object""" @@ -76,7 +90,7 @@ def _construct_embedding(self, point_color=BLUE, dot_radius=0.05): self.point_dots = VGroup() for point in points: point_location = embedding.axes.coords_to_point(*point) - dot = Dot(point_location, color=point_color, radius=dot_radius/2) + dot = Dot(point_location, color=point_color, radius=dot_radius / 2) self.point_dots.add(dot) embedding.add(self.point_dots) @@ -84,10 +98,13 @@ def _construct_embedding(self, point_color=BLUE, dot_radius=0.05): def construct(self): # Make the VAE decoder - vae_decoder = NeuralNetwork([ - FeedForwardLayer(3), - FeedForwardLayer(5), - ], layer_spacing=0.55) + vae_decoder = NeuralNetwork( + [ + FeedForwardLayer(3), + FeedForwardLayer(5), + ], + layer_spacing=0.55, + ) vae_decoder.shift([-0.55, 0, 0]) self.play(Create(vae_decoder), run_time=1) @@ -99,6 +116,8 @@ def construct(self): self.play(Create(embedding)) # Make disentanglment visualization disentanglement = DisentanglementVisualization() - disentanglement_animation = disentanglement.make_disentanglement_generation_animation() + disentanglement_animation = ( + disentanglement.make_disentanglement_generation_animation() + ) self.play(disentanglement_animation, run_time=3) self.play(Wait(2)) diff --git a/examples/epsilon_nn_graph/epsilon_nn_graph.py b/examples/epsilon_nn_graph/epsilon_nn_graph.py index c58d5a9..3712d1c 100644 --- a/examples/epsilon_nn_graph/epsilon_nn_graph.py +++ b/examples/epsilon_nn_graph/epsilon_nn_graph.py @@ -13,6 +13,7 @@ config.frame_height = 12.0 config.frame_width = 12.0 + def make_moon_points(num_samples=100, noise=0.1, random_seed=1): """Make two half moon point shapes""" # Make sure the points are normalized @@ -24,17 +25,19 @@ def make_moon_points(num_samples=100, noise=0.1, random_seed=1): return X + def make_epsilon_balls(epsilon_value, points, axes, ball_color=RED, opacity=0.0): - """Draws epsilon balls """ + """Draws epsilon balls""" balls = [] for point in points: ball = Circle(epsilon_value, color=ball_color, fill_opacity=opacity) global_location = axes.coords_to_point(*point) ball.move_to(global_location) balls.append(ball) - + return VGroup(*balls) + def make_epsilon_graph(epsilon_value, dots, points, edge_color=ORANGE): """Makes an epsilon nearest neighbor graph for the given dots""" # First compute the adjacency matrix from the epsilon value and the points @@ -46,7 +49,7 @@ def make_epsilon_graph(epsilon_value, dots, points, edge_color=ORANGE): dist = np.linalg.norm(dots[i].get_center() - dots[j].get_center()) is_connected = 1 if dist < epsilon_value else 0 adjacency_matrix[i, j] = is_connected - # Draw a graph based on the adjacency matrix + # Draw a graph based on the adjacency matrix edges = [] for i in range(num_dots): for j in range(i): @@ -56,28 +59,28 @@ def make_epsilon_graph(epsilon_value, dots, points, edge_color=ORANGE): dot_a = dots[i] dot_b = dots[j] edge = Line( - dot_a.get_center(), - dot_b.get_center(), + dot_a.get_center(), + dot_b.get_center(), color=edge_color, - stroke_width=3 + stroke_width=3, ) edges.append(edge) return VGroup(*edges), adjacency_matrix + def perform_spectral_clustering(adjacency_matrix): """Performs spectral clustering given adjacency matrix""" clustering = SpectralClustering( - n_clusters=2, - affinity="precomputed", - random_state=0 + n_clusters=2, affinity="precomputed", random_state=0 ).fit(adjacency_matrix) labels = clustering.labels_ return labels + def make_color_change_animation(labels, dots, colors=[ORANGE, GREEN]): - """Makes a color change animation """ + """Makes a color change animation""" anims = [] for index in range(len(labels)): @@ -86,21 +89,30 @@ def make_color_change_animation(labels, dots, colors=[ORANGE, GREEN]): anims.append(dot.animate.set_color(color)) return AnimationGroup(*anims, lag_ratio=0.0) - -class EpsilonNearestNeighborScene(Scene): - def construct(self, num_points=200, dot_radius=0.1, - dot_color=BLUE, ball_color=WHITE, noise=0.1, ball_opacity=0.0, - random_seed=2): + +class EpsilonNearestNeighborScene(Scene): + def construct( + self, + num_points=200, + dot_radius=0.1, + dot_color=BLUE, + ball_color=WHITE, + noise=0.1, + ball_opacity=0.0, + random_seed=2, + ): # Make moon shape points # Note: dot is the drawing object and point is the math concept - moon_points = make_moon_points(num_samples=num_points, noise=noise, random_seed=random_seed) + moon_points = make_moon_points( + num_samples=num_points, noise=noise, random_seed=random_seed + ) # Make an axes axes = Axes( x_range=[-6, 6, 1], y_range=[-6, 6, 1], - x_length=12, - y_length=12, + x_length=12, + y_length=12, tips=False, axis_config={"stroke_color": "#000000"}, ) @@ -116,8 +128,10 @@ def construct(self, num_points=200, dot_radius=0.1, dots = VGroup(*dots) self.play(Create(dots)) # Draw epsilon bar with initial value - epsilon_bar = NumberLine([0, 2], length=8, stroke_width=2, include_ticks=False, include_numbers=False) - epsilon_bar.shift(4.5*DOWN) + epsilon_bar = NumberLine( + [0, 2], length=8, stroke_width=2, include_ticks=False, include_numbers=False + ) + epsilon_bar.shift(4.5 * DOWN) self.play(Create(epsilon_bar)) current_epsilon = ValueTracker(0.3) epsilon_point = epsilon_bar.number_to_point(current_epsilon.get_value()) @@ -128,7 +142,7 @@ def construct(self, num_points=200, dot_radius=0.1, label_text = MathTex("\epsilon").scale(1.5) # label_text = Text("Epsilon") label_text.move_to(epsilon_bar.get_center()) - label_text.shift(DOWN*0.5) + label_text.shift(DOWN * 0.5) self.add(label_text) # Make an updater for the dot def dot_updater(epsilon_dot): @@ -139,12 +153,17 @@ def dot_updater(epsilon_dot): epsilon_dot.add_updater(dot_updater) # Make the epsilon balls epsilon_balls = make_epsilon_balls( - current_epsilon.get_value(), moon_points, axes, ball_color=ball_color, opacity=ball_opacity + current_epsilon.get_value(), + moon_points, + axes, + ball_color=ball_color, + opacity=ball_opacity, ) # Set up updater for radius of balls def epsilon_balls_updater(epsilon_balls): for ball in epsilon_balls: ball.set_width(current_epsilon.get_value()) + # Turn epsilon up and down epsilon_balls.add_updater(epsilon_balls_updater) # Fade in the initial balls @@ -156,10 +175,7 @@ def epsilon_balls_updater(epsilon_balls): epsilon_value = 0.9 # Show connecting graph epsilon_graph, adjacency_matrix = make_epsilon_graph( - current_epsilon.get_value(), - dots, - moon_points, - edge_color=WHITE + current_epsilon.get_value(), dots, moon_points, edge_color=WHITE ) self.play(FadeOut(epsilon_balls)) self.play(FadeIn(epsilon_graph)) @@ -172,4 +188,4 @@ def epsilon_balls_updater(epsilon_balls): self.play(color_change_animation) # Fade out graph edges self.play(FadeOut(epsilon_graph)) - self.play(Wait(5.0)) \ No newline at end of file + self.play(Wait(5.0)) diff --git a/examples/gan/gan.py b/examples/gan/gan.py index d60c648..b27ecd8 100644 --- a/examples/gan/gan.py +++ b/examples/gan/gan.py @@ -17,9 +17,10 @@ config.frame_height = 8.3 config.frame_width = 8.3 + class GAN(Mobject): """Generative Adversarial Network""" - + def __init__(self): super().__init__() self.make_entities() @@ -29,27 +30,35 @@ def __init__(self): def make_entities(self, image_height=1.2): """Makes all of the network entities""" # Make the fake image layer - default_image = Image.open(ROOT_DIR / 'assets/gan/fake_image.png') + default_image = Image.open(ROOT_DIR / "assets/gan/fake_image.png") numpy_image = np.asarray(default_image) - self.fake_image_layer = ImageLayer(numpy_image, height=image_height, show_image_on_create=False) + self.fake_image_layer = ImageLayer( + numpy_image, height=image_height, show_image_on_create=False + ) # Make the Generator Network - self.generator = NeuralNetwork([ - EmbeddingLayer(covariance=np.array([[3.0, 0], [0, 3.0]])).scale(1.3), - FeedForwardLayer(3), - FeedForwardLayer(5), - self.fake_image_layer - ], layer_spacing=0.1) + self.generator = NeuralNetwork( + [ + EmbeddingLayer(covariance=np.array([[3.0, 0], [0, 3.0]])).scale(1.3), + FeedForwardLayer(3), + FeedForwardLayer(5), + self.fake_image_layer, + ], + layer_spacing=0.1, + ) self.add(self.generator) # Make the Discriminator - self.discriminator = NeuralNetwork([ - FeedForwardLayer(5), - FeedForwardLayer(1), - VectorLayer(1, value_func=lambda: random.uniform(0, 1)), - ], layer_spacing=0.1) + self.discriminator = NeuralNetwork( + [ + FeedForwardLayer(5), + FeedForwardLayer(1), + VectorLayer(1, value_func=lambda: random.uniform(0, 1)), + ], + layer_spacing=0.1, + ) self.add(self.discriminator) # Make Ground Truth Dataset - default_image = Image.open(ROOT_DIR / 'assets/gan/real_image.jpg') + default_image = Image.open(ROOT_DIR / "assets/gan/real_image.jpg") numpy_image = np.asarray(default_image) self.ground_truth_layer = ImageLayer(numpy_image, height=image_height) self.add(self.ground_truth_layer) @@ -90,7 +99,7 @@ def make_titles(self): self.probability_title = Text("Probability").scale(0.5) self.probability_title.move_to(self.discriminator.input_layers[-2]) self.probability_title.shift(UP) - self.probability_title.shift(RIGHT*1.05) + self.probability_title.shift(RIGHT * 1.05) titles.add(self.probability_title) return titles @@ -99,16 +108,10 @@ def make_highlight_generator_rectangle(self): """Returns animation that highlights the generators contents""" group = VGroup() - generator_surrounding_group = Group( - self.generator, - self.fake_image_layer_title - ) + generator_surrounding_group = Group(self.generator, self.fake_image_layer_title) generator_surrounding_rectangle = SurroundingRectangle( - generator_surrounding_group, - buff=0.1, - stroke_width=4.0, - color="#0FFF50" + generator_surrounding_group, buff=0.1, stroke_width=4.0, color="#0FFF50" ) group.add(generator_surrounding_rectangle) title = Text("Generator").scale(0.5) @@ -124,16 +127,13 @@ def make_highlight_discriminator_rectangle(self): self.fake_image_layer, self.ground_truth_layer, self.fake_image_layer_title, - self.probability_title + self.probability_title, ) group = VGroup() discriminator_surrounding_rectangle = SurroundingRectangle( - discriminator_group, - buff=0.05, - stroke_width=4.0, - color="#0FFF50" + discriminator_group, buff=0.05, stroke_width=4.0, color="#0FFF50" ) group.add(discriminator_surrounding_rectangle) title = Text("Discriminator").scale(0.5) @@ -144,7 +144,7 @@ def make_highlight_discriminator_rectangle(self): def make_generator_forward_pass(self): """Makes forward pass of the generator""" - + forward_pass = self.generator.make_forward_pass_animation(dist_theme="ellipse") return forward_pass @@ -153,7 +153,7 @@ def make_discriminator_forward_pass(self): """Makes forward pass of the discriminator""" disc_forward = self.discriminator.make_forward_pass_animation() - + return disc_forward @override_animation(Create) @@ -163,18 +163,19 @@ def _create_override(self): Create(self.generator), Create(self.discriminator), Create(self.ground_truth_layer), - Create(self.titles) + Create(self.titles), ) return animation_group + class GANScene(Scene): """GAN Scene""" def construct(self): gan = GAN().scale(1.70) gan.move_to(ORIGIN) - gan.shift(DOWN*0.35) - gan.shift(LEFT*0.1) + gan.shift(DOWN * 0.35) + gan.shift(LEFT * 0.1) self.play(Create(gan), run_time=3) # Highlight generator highlight_generator_rectangle = gan.make_highlight_generator_rectangle() @@ -184,11 +185,11 @@ def construct(self): self.play(gen_forward_pass, run_time=5) # Fade out generator highlight self.play(Uncreate(highlight_generator_rectangle), run_time=1) - # Highlight discriminator + # Highlight discriminator highlight_discriminator_rectangle = gan.make_highlight_discriminator_rectangle() self.play(Create(highlight_discriminator_rectangle), run_time=1) # Discriminator forward pass discriminator_forward_pass = gan.make_discriminator_forward_pass() self.play(discriminator_forward_pass, run_time=5) # Unhighlight discriminator - self.play(Uncreate(highlight_discriminator_rectangle), run_time=1) \ No newline at end of file + self.play(Uncreate(highlight_discriminator_rectangle), run_time=1) diff --git a/examples/interpolation/interpolation.py b/examples/interpolation/interpolation.py index 9928e87..4c347d4 100644 --- a/examples/interpolation/interpolation.py +++ b/examples/interpolation/interpolation.py @@ -1,7 +1,7 @@ - """Visualization of VAE Interpolation""" import sys import os + sys.path.append(os.environ["PROJECT_ROOT"]) from manim import * import pickle @@ -12,22 +12,25 @@ """ The VAE Scene for the twitter video. """ -config.pixel_height = 720 -config.pixel_width = 1280 +config.pixel_height = 720 +config.pixel_width = 1280 config.frame_height = 6.0 config.frame_width = 6.0 # Set random seed so point distribution is constant np.random.seed(1) + class InterpolationScene(MovingCameraScene): """Scene object for a Variational Autoencoder and Autoencoder""" def construct(self): # Set Scene config - vae = variational_autoencoder.VariationalAutoencoder(dot_radius=0.035, layer_spacing=0.5) + vae = variational_autoencoder.VariationalAutoencoder( + dot_radius=0.035, layer_spacing=0.5 + ) vae.move_to(ORIGIN) - vae.encoder.shift(LEFT*0.5) - vae.decoder.shift(RIGHT*0.5) + vae.encoder.shift(LEFT * 0.5) + vae.decoder.shift(RIGHT * 0.5) mnist_image_handler = variational_autoencoder.MNISTImageHandler() image_pair = mnist_image_handler.image_pairs[3] # Make forward pass animation and DO NOT run it @@ -35,23 +38,22 @@ def construct(self): # Make the interpolation animation interpolation_images = mnist_image_handler.interpolation_images interpolation_animation = vae.make_interpolation_animation(interpolation_images) - embedding_zoom_animation = self.camera.auto_zoom([ - vae.embedding, - vae.decoder, - vae.output_image - ], margin=0.5) + embedding_zoom_animation = self.camera.auto_zoom( + [vae.embedding, vae.decoder, vae.output_image], margin=0.5 + ) # Make animations forward_pass_animations = [] for i in range(7): anim = vae.decoder.make_forward_propagation_animation(run_time=0.5) forward_pass_animations.append(anim) - forward_pass_animation_group = AnimationGroup(*forward_pass_animations, lag_ratio=1.0) + forward_pass_animation_group = AnimationGroup( + *forward_pass_animations, lag_ratio=1.0 + ) # Make forward pass animations self.play(Create(vae), run_time=1.5) self.play(FadeOut(vae.encoder), run_time=1.0) self.play(embedding_zoom_animation, run_time=1.5) interpolation_animation = AnimationGroup( - forward_pass_animation_group, - interpolation_animation + forward_pass_animation_group, interpolation_animation ) self.play(interpolation_animation, run_time=9.0) diff --git a/examples/logo/logo.py b/examples/logo/logo.py index 7e3d6fc..e0c5575 100644 --- a/examples/logo/logo.py +++ b/examples/logo/logo.py @@ -9,16 +9,18 @@ config.frame_height = 4.0 config.frame_width = 4.0 -class ManimMLLogo(Scene): +class ManimMLLogo(Scene): def construct(self): self.text = Text("ManimML") self.text.scale(1.0) - self.neural_network = FeedForwardNeuralNetwork([3, 5, 3, 6, 3], layer_spacing=0.3, node_color=BLUE) + self.neural_network = FeedForwardNeuralNetwork( + [3, 5, 3, 6, 3], layer_spacing=0.3, node_color=BLUE + ) self.neural_network.scale(1.0) self.neural_network.move_to(self.text.get_bottom()) self.neural_network.shift(1.25 * DOWN) - self.logo_group = Group(self.text, self.neural_network) + self.logo_group = Group(self.text, self.neural_network) self.logo_group.scale(1.0) self.logo_group.move_to(ORIGIN) self.play(Write(self.text)) @@ -28,14 +30,14 @@ def construct(self): animation_group = AnimationGroup( self.neural_network.make_forward_pass_animation(run_time=5), Create(underline), - # Create(self.surrounding_rectangle) + # Create(self.surrounding_rectangle) ) # self.surrounding_rectangle = SurroundingRectangle(self.logo_group, buff=0.3, color=BLUE) underline = Underline(self.text, color=BLUE) animation_group = AnimationGroup( self.neural_network.make_forward_pass_animation(run_time=5), Create(underline), - # Create(self.surrounding_rectangle) + # Create(self.surrounding_rectangle) ) self.play(animation_group) self.wait(5) diff --git a/examples/logo/website_logo.py b/examples/logo/website_logo.py index b9f129b..2215817 100644 --- a/examples/logo/website_logo.py +++ b/examples/logo/website_logo.py @@ -9,11 +9,12 @@ config.frame_height = 8.0 config.frame_width = 10.0 -class ManimMLLogo(Scene): +class ManimMLLogo(Scene): def construct(self): - self.neural_network = FeedForwardNeuralNetwork([3, 5, 3, 5], layer_spacing=0.6, node_color=BLUE, - edge_width=6) + self.neural_network = FeedForwardNeuralNetwork( + [3, 5, 3, 5], layer_spacing=0.6, node_color=BLUE, edge_width=6 + ) self.neural_network.scale(3) self.neural_network.move_to(ORIGIN) self.play(Create(self.neural_network)) diff --git a/examples/paper_visualizations/oracle_guidance/oracle_guidance.py b/examples/paper_visualizations/oracle_guidance/oracle_guidance.py index 1f10467..3b658c4 100644 --- a/examples/paper_visualizations/oracle_guidance/oracle_guidance.py +++ b/examples/paper_visualizations/oracle_guidance/oracle_guidance.py @@ -23,19 +23,20 @@ ROOT_DIR = Path(__file__).parents[3] -class Localizer(): + +class Localizer: """ - Holds the localizer object, which contains the queries, images, etc. - needed to represent a localization run. + Holds the localizer object, which contains the queries, images, etc. + needed to represent a localization run. """ - + def __init__(self, axes): - # Set dummy values for these + # Set dummy values for these self.index = -1 self.axes = axes self.num_queries = 3 self.assets_path = ROOT_DIR / "assets/oracle_guidance" - self.ground_truth_image_path = self.assets_path / "ground_truth.jpg" + self.ground_truth_image_path = self.assets_path / "ground_truth.jpg" self.ground_truth_location = np.array([2, 3]) # Prior distribution print("initial gaussian") @@ -45,52 +46,61 @@ def __init__(self, axes): cov=np.array([[3, 0], [0, 3]]), dist_theme="ellipse", color=GREEN, - ) + ) # Define the query images and embedded locations # Contains image paths [(positive_path, negative_path), ...] self.query_image_paths = [ - (os.path.join(self.assets_path, "positive_1.jpg"), os.path.join(self.assets_path, "negative_1.jpg")), - (os.path.join(self.assets_path, "positive_2.jpg"), os.path.join(self.assets_path, "negative_2.jpg")), - (os.path.join(self.assets_path, "positive_3.jpg"), os.path.join(self.assets_path, "negative_3.jpg")), - ] + ( + os.path.join(self.assets_path, "positive_1.jpg"), + os.path.join(self.assets_path, "negative_1.jpg"), + ), + ( + os.path.join(self.assets_path, "positive_2.jpg"), + os.path.join(self.assets_path, "negative_2.jpg"), + ), + ( + os.path.join(self.assets_path, "positive_3.jpg"), + os.path.join(self.assets_path, "negative_3.jpg"), + ), + ] # Contains 2D locations for each image [([2, 3], [2, 4]), ...] self.query_locations = [ (np.array([-1, -1]), np.array([1, 1])), - (np.array([1, -1]), np.array([-1, 1])), + (np.array([1, -1]), np.array([-1, 1])), (np.array([0.3, -0.6]), np.array([-0.5, 0.7])), - ] + ] # Make the covariances for each query self.query_covariances = [ (np.array([[0.3, 0], [0.0, 0.2]]), np.array([[0.2, 0], [0.0, 0.2]])), - (np.array([[0.2, 0], [0.0, 0.2]]), np.array([[0.2, 0], [0.0, 0.2]])), (np.array([[0.2, 0], [0.0, 0.2]]), np.array([[0.2, 0], [0.0, 0.2]])), - ] + (np.array([[0.2, 0], [0.0, 0.2]]), np.array([[0.2, 0], [0.0, 0.2]])), + ] # Posterior distributions over time GaussianDistribution objects self.posterior_distributions = [ GaussianDistribution( - self.axes, - dist_theme="ellipse", + self.axes, + dist_theme="ellipse", color=GREEN, mean=np.array([-0.3, -0.3]), - cov=np.array([[5, -4], [-4, 6]]) + cov=np.array([[5, -4], [-4, 6]]), ).scale(0.6), GaussianDistribution( - self.axes, - dist_theme="ellipse", + self.axes, + dist_theme="ellipse", color=GREEN, mean=np.array([0.25, -0.25]), - cov=np.array([[3, -2], [-2, 4]]) + cov=np.array([[3, -2], [-2, 4]]), ).scale(0.35), GaussianDistribution( - self.axes, - dist_theme="ellipse", + self.axes, + dist_theme="ellipse", color=GREEN, mean=np.array([0.4, -0.35]), - cov=np.array([[1, 0], [0, 1]]) + cov=np.array([[1, 0], [0, 1]]), ).scale(0.3), ] # Some assumptions - assert len(self.query_locations) == len(self.query_image_paths) + assert len(self.query_locations) == len(self.query_image_paths) assert len(self.query_locations) == len(self.posterior_distributions) def __iter__(self): @@ -105,16 +115,16 @@ def __next__(self): # Return query_paths, query_locations, posterior out_tuple = ( - self.query_image_paths[self.index], - self.query_locations[self.index], + self.query_image_paths[self.index], + self.query_locations[self.index], self.posterior_distributions[self.index], - self.query_covariances[self.index] + self.query_covariances[self.index], ) return out_tuple -class OracleGuidanceVisualization(Scene): +class OracleGuidanceVisualization(Scene): def __init__(self): super().__init__() self.neural_network, self.embedding_layer = self.make_vae() @@ -125,36 +135,37 @@ def __init__(self): # VAE embedding animation image paths self.assets_path = ROOT_DIR / "assets/oracle_guidance" self.input_embed_image_path = os.path.join(self.assets_path, "input_image.jpg") - self.output_embed_image_path = os.path.join(self.assets_path, "output_image.jpg") + self.output_embed_image_path = os.path.join( + self.assets_path, "output_image.jpg" + ) def make_vae(self): """Makes a simple VAE architecture""" embedding_layer = EmbeddingLayer(dist_theme="ellipse") - self.encoder = NeuralNetwork([ - FeedForwardLayer(5), - FeedForwardLayer(3), - embedding_layer, - ]) - - self.decoder = NeuralNetwork([ - FeedForwardLayer(3), - FeedForwardLayer(5), - ]) - - neural_network = NeuralNetwork([ - self.encoder, - self.decoder - ]) - - neural_network.shift(DOWN*0.4) + self.encoder = NeuralNetwork( + [ + FeedForwardLayer(5), + FeedForwardLayer(3), + embedding_layer, + ] + ) + + self.decoder = NeuralNetwork( + [ + FeedForwardLayer(3), + FeedForwardLayer(5), + ] + ) + + neural_network = NeuralNetwork([self.encoder, self.decoder]) + + neural_network.shift(DOWN * 0.4) return neural_network, embedding_layer @override_animation(Create) def _create_animation(self): - animation_group = AnimationGroup( - Create(self.neural_network) - ) - + animation_group = AnimationGroup(Create(self.neural_network)) + return animation_group def insert_at_start(self, layer, create=True): @@ -168,20 +179,14 @@ def insert_at_start(self, layer, create=True): self.encoder.all_layers.insert(1, connective_layer) # Move layers to the correct location # TODO: Fix this cause its hacky - layer.shift(DOWN*0.4) - layer.shift(LEFT*2.35) + layer.shift(DOWN * 0.4) + layer.shift(LEFT * 2.35) # Make insert animation if not create: - animation_group = AnimationGroup( - Create(connective_layer) - ) + animation_group = AnimationGroup(Create(connective_layer)) else: - animation_group = AnimationGroup( - Create(layer), - Create(connective_layer) - ) + animation_group = AnimationGroup(Create(layer), Create(connective_layer)) self.play(animation_group) - def remove_start_layer(self): """Removes the first layer of the network""" @@ -189,8 +194,7 @@ def remove_start_layer(self): first_connective = self.encoder.all_layers.remove_at_index(0) # Make remove animations animation_group = AnimationGroup( - FadeOut(first_layer), - FadeOut(first_connective) + FadeOut(first_layer), FadeOut(first_connective) ) self.play(animation_group) @@ -205,13 +209,10 @@ def insert_at_end(self, layer): self.decoder.all_layers.add(layer) # Move layers to the correct location # TODO: Fix this cause its hacky - layer.shift(DOWN*0.4) - layer.shift(RIGHT*2.35) + layer.shift(DOWN * 0.4) + layer.shift(RIGHT * 2.35) # Make insert animation - animation_group = AnimationGroup( - Create(layer), - Create(connective_layer) - ) + animation_group = AnimationGroup(Create(layer), Create(connective_layer)) self.play(animation_group) def remove_end_layer(self): @@ -220,8 +221,7 @@ def remove_end_layer(self): first_connective = self.decoder.all_layers.remove_at_index(-1) # Make remove animations animation_group = AnimationGroup( - FadeOut(first_layer), - FadeOut(first_connective) + FadeOut(first_layer), FadeOut(first_connective) ) self.play(animation_group) @@ -271,19 +271,18 @@ def make_embed_input_image_animation(self, input_image_path, output_image_path): self.encoder.all_layers.insert(1, connective_layer) # Move layers to the correct location # TODO: Fix this cause its hacky - input_image_layer.shift(DOWN*0.4) - input_image_layer.shift(LEFT*2.35) + input_image_layer.shift(DOWN * 0.4) + input_image_layer.shift(LEFT * 2.35) # Play full forward pass forward_pass = self.neural_network.make_forward_pass_animation( - layer_args= - { + layer_args={ self.encoder: { self.embedding_layer: { "dist_args": { "cov": np.array([[1.5, 0], [0, 1.5]]), "mean": np.array([0.5, 0.5]), "dist_theme": "ellipse", - "color": ORANGE + "color": ORANGE, } } } @@ -302,14 +301,14 @@ def make_embed_input_image_animation(self, input_image_path, output_image_path): def make_localization_time_step(self, old_posterior): """ - Performs one query update for the localization procedure - - Procedure: - a. Embed query input images - b. Oracle is asked a query - c. Query is embedded - d. Show posterior update - e. Show current recomendation + Performs one query update for the localization procedure + + Procedure: + a. Embed query input images + b. Oracle is asked a query + c. Query is embedded + d. Show posterior update + e. Show current recomendation """ # Helper functions def embed_query_to_latent_space(query_locations, query_covariance): @@ -331,41 +330,42 @@ def embed_query_to_latent_space(query_locations, query_covariance): "cov": query_covariance[0], "mean": query_locations[0], "dist_theme": "ellipse", - "color": BLUE + "color": BLUE, }, "negative_dist_args": { "cov": query_covariance[1], "mean": query_locations[1], "dist_theme": "ellipse", - "color": RED - } + "color": RED, + }, } - } + }, ) self.play(embed_query_animation) # Access localizer information - query_paths, query_locations, posterior_distribution, query_covariances = next(self.localizer) + query_paths, query_locations, posterior_distribution, query_covariances = next( + self.localizer + ) positive_path, negative_path = query_paths # Make subtitle for present user with query self.change_subtitle("2. Present User with Query") # Insert the layer into the encoder - query_layer = PairedQueryLayer.from_paths(positive_path, negative_path, grayscale=False) + query_layer = PairedQueryLayer.from_paths( + positive_path, negative_path, grayscale=False + ) query_layer.scale(0.5) self.insert_at_start(query_layer) # Embed query to latent space query_to_latent_space_animation = embed_query_to_latent_space( - query_locations, - query_covariances + query_locations, query_covariances ) # Wait self.play(Wait(1)) # Update the posterior self.change_subtitle("4. Update the Posterior") # Remove the old posterior - self.play( - ReplacementTransform(old_posterior, posterior_distribution) - ) + self.play(ReplacementTransform(old_posterior, posterior_distribution)) """ self.play( self.embedding_layer.remove_gaussian_distribution(self.localizer.posterior_distribution) @@ -376,12 +376,12 @@ def embed_query_to_latent_space(query_locations, query_covariance): # Remove query layer self.remove_start_layer() # Remove query ellipses - + fade_outs = [] for dist in self.embedding_layer.gaussian_distributions: self.embedding_layer.gaussian_distributions.remove(dist) fade_outs.append(FadeOut(dist)) - + if not len(fade_outs) == 0: fade_outs = AnimationGroup(*fade_outs) self.play(fade_outs) @@ -408,14 +408,21 @@ def make_generate_estimate_animation(self, estimate_image_path): # Remove the image at the end print(self.neural_network) self.remove_end_layer() - + def make_triplet_forward_animation(self): """Make triplet forward animation""" # Make triplet layer anchor_path = os.path.join(self.assets_path, "anchor.jpg") positive_path = os.path.join(self.assets_path, "positive.jpg") negative_path = os.path.join(self.assets_path, "negative.jpg") - triplet_layer = TripletLayer.from_paths(anchor_path, positive_path, negative_path, grayscale=False, font_size=100, buff=1.05) + triplet_layer = TripletLayer.from_paths( + anchor_path, + positive_path, + negative_path, + grayscale=False, + font_size=100, + buff=1.05, + ) triplet_layer.scale(0.10) self.insert_at_start(triplet_layer) # Make latent triplet animation @@ -428,35 +435,35 @@ def make_triplet_forward_animation(self): "cov": np.array([[0.3, 0], [0, 0.3]]), "mean": np.array([0.7, 1.4]), "dist_theme": "ellipse", - "color": BLUE + "color": BLUE, }, "positive_dist": { "cov": np.array([[0.2, 0], [0, 0.2]]), "mean": np.array([0.8, -0.4]), "dist_theme": "ellipse", - "color": GREEN + "color": GREEN, }, "negative_dist": { "cov": np.array([[0.4, 0], [0, 0.25]]), "mean": np.array([-1, -1.2]), "dist_theme": "ellipse", - "color": RED - } + "color": RED, + }, } } }, - run_time=3 + run_time=3, ) ) def construct(self): """ - Makes the whole visualization. + Makes the whole visualization. - 1. Create the Architecture - a. Create the traditional VAE architecture with images - 2. The Localization Procedure - 3. The Training Procedure + 1. Create the Architecture + a. Create the traditional VAE architecture with images + 2. The Localization Procedure + 3. The Training Procedure """ # 1. Create the Architecture self.neural_network.scale(1.2) @@ -482,7 +489,7 @@ def construct(self): self.play(Wait(1)) if not query_index == self.localizer.num_queries - 1: # Repeat - self.change_subtitle("5. Repeat") + self.change_subtitle("5. Repeat") # Wait a second self.play(Wait(1)) # Generate final estimate @@ -499,8 +506,7 @@ def construct(self): # Do an Image forward pass self.change_subtitle("1. Unsupervised Image Reconstruction") self.make_embed_input_image_animation( - self.input_embed_image_path, - self.output_embed_image_path + self.input_embed_image_path, self.output_embed_image_path ) self.wait(1) # Do triplet forward pass diff --git a/examples/variational_autoencoder/autoencoder_models/generate_disentanglement.py b/examples/variational_autoencoder/autoencoder_models/generate_disentanglement.py index c44a66e..84729ce 100644 --- a/examples/variational_autoencoder/autoencoder_models/generate_disentanglement.py +++ b/examples/variational_autoencoder/autoencoder_models/generate_disentanglement.py @@ -1,8 +1,13 @@ import pickle import sys import os + sys.path.append(os.environ["PROJECT_ROOT"]) -from autoencoder_models.variational_autoencoder import VAE, load_dataset, load_vae_from_path +from autoencoder_models.variational_autoencoder import ( + VAE, + load_dataset, + load_vae_from_path, +) import matplotlib.pyplot as plt import numpy as np import torch @@ -10,6 +15,7 @@ import scipy.stats import cv2 + def binned_images(model_path, num_x_bins=6, plot=False): latent_dim = 2 model = load_vae_from_path(model_path, latent_dim) @@ -30,8 +36,8 @@ def binned_images(model_path, num_x_bins=6, plot=False): embedding.append(mean) images = np.stack(images) tsne_points = np.array(embedding) - tsne_points = (tsne_points - tsne_points.mean(axis=0))/(tsne_points.std(axis=0)) - # make vis + tsne_points = (tsne_points - tsne_points.mean(axis=0)) / (tsne_points.std(axis=0)) + # make vis num_points = np.shape(tsne_points)[0] x_min = np.amin(tsne_points.T[0]) y_min = np.amin(tsne_points.T[1]) @@ -41,11 +47,17 @@ def binned_images(model_path, num_x_bins=6, plot=False): # to keep it square the same width is used for x and y dim x_bins, step = np.linspace(x_min, x_max, num_x_bins, retstep=True) x_bins = x_bins.astype(float) - num_y_bins = np.absolute(np.ceil((y_max - y_min)/step)).astype(int) + num_y_bins = np.absolute(np.ceil((y_max - y_min) / step)).astype(int) y_bins = np.linspace(y_min, y_max, num_y_bins) # sort the tsne_points into a 2d histogram tsne_points = tsne_points.squeeze() - hist_obj = scipy.stats.binned_statistic_dd(tsne_points, np.arange(num_points), statistic='count', bins=[x_bins, y_bins], expand_binnumbers=True) + hist_obj = scipy.stats.binned_statistic_dd( + tsne_points, + np.arange(num_points), + statistic="count", + bins=[x_bins, y_bins], + expand_binnumbers=True, + ) # sample one point from each bucket binnumbers = hist_obj.binnumber num_x_bins = np.amax(binnumbers[0]) + 1 @@ -53,30 +65,38 @@ def binned_images(model_path, num_x_bins=6, plot=False): binnumbers = binnumbers.T # some places have no value in a region used_mask = np.zeros((num_y_bins, num_x_bins)) - image_bins = np.zeros((num_y_bins, num_x_bins, 3, np.shape(images)[2], np.shape(images)[2])) + image_bins = np.zeros( + (num_y_bins, num_x_bins, 3, np.shape(images)[2], np.shape(images)[2]) + ) for i, bin_num in enumerate(list(binnumbers)): used_mask[bin_num[1], bin_num[0]] = 1 image_bins[bin_num[1], bin_num[0]] = images[i] # plot a grid of the images - fig, axs = plt.subplots(nrows=np.shape(y_bins)[0], ncols=np.shape(x_bins)[0], constrained_layout=False, dpi=50) + fig, axs = plt.subplots( + nrows=np.shape(y_bins)[0], + ncols=np.shape(x_bins)[0], + constrained_layout=False, + dpi=50, + ) images = [] bin_indices = [] for y in range(num_y_bins): for x in range(num_x_bins): if used_mask[y, x] > 0.0: - image = np.uint8(image_bins[y][x].squeeze()*255) + image = np.uint8(image_bins[y][x].squeeze() * 255) image = np.rollaxis(image, 0, 3) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) axs[num_y_bins - 1 - y][x].imshow(image) images.append(image) bin_indices.append((y, x)) - axs[y, x].axis('off') + axs[y, x].axis("off") if plot: - plt.axis('off') + plt.axis("off") plt.show() else: return images, bin_indices + def generate_disentanglement(model_path="saved_models/model_dim2.pth"): """Generates disentanglement visualization and serializes it""" # Disentanglement object @@ -89,11 +109,12 @@ def generate_disentanglement(model_path="saved_models/model_dim2.pth"): with open("disentanglement.pkl", "wb") as f: pickle.dump(disentanglement_object, f) + if __name__ == "__main__": plot = False if plot: model_path = "saved_models/model_dim2.pth" - #uniform_image_sample(model_path) + # uniform_image_sample(model_path) binned_images(model_path) else: - generate_disentanglement() \ No newline at end of file + generate_disentanglement() diff --git a/examples/variational_autoencoder/autoencoder_models/generate_images.py b/examples/variational_autoencoder/autoencoder_models/generate_images.py index 876eec7..4acfbe5 100644 --- a/examples/variational_autoencoder/autoencoder_models/generate_images.py +++ b/examples/variational_autoencoder/autoencoder_models/generate_images.py @@ -13,17 +13,16 @@ # Transforms images to a PyTorch Tensor tensor_transform = transforms.ToTensor() # Download the MNIST Dataset -dataset = datasets.MNIST(root = "./data", - train = True, - download = True, - transform = tensor_transform) +dataset = datasets.MNIST( + root="./data", train=True, download=True, transform=tensor_transform +) # Generate reconstructions num_recons = 10 fig, axs = plt.subplots(num_recons, 2, figsize=(2, num_recons)) image_pairs = [] for i in range(num_recons): base_image, _ = dataset[i] - base_image = base_image.reshape(-1, 28*28) + base_image = base_image.reshape(-1, 28 * 28) _, _, recon_image, _ = vae.forward(base_image) base_image = base_image.detach().numpy() base_image = np.reshape(base_image, (28, 28)) * 255 diff --git a/examples/variational_autoencoder/autoencoder_models/generate_interpolation.py b/examples/variational_autoencoder/autoencoder_models/generate_interpolation.py index 936fa75..ec3dcee 100644 --- a/examples/variational_autoencoder/autoencoder_models/generate_interpolation.py +++ b/examples/variational_autoencoder/autoencoder_models/generate_interpolation.py @@ -14,12 +14,12 @@ # Generate reconstructions num_images = 50 image_pairs = [] -save_object = {"interpolation_path":[], "interpolation_images":[]} +save_object = {"interpolation_path": [], "interpolation_images": []} # Make interpolation path image_a, image_b = dataset[0][0], dataset[1][0] -image_a = image_a.view(32*32) -image_b = image_b.view(32*32) +image_a = image_a.view(32 * 32) +image_b = image_b.view(32 * 32) z_a, _, _, _ = vae.forward(image_a) z_a = z_a.detach().cpu().numpy() z_b, _, _, _ = vae.forward(image_b) @@ -29,7 +29,7 @@ save_object["interpolation_path"] = interpolation_path for i in range(num_images): - # Generate + # Generate z = torch.Tensor(interpolation_path[i]).unsqueeze(0) gen_image = vae.decode(z).detach().numpy() gen_image = np.reshape(gen_image, (32, 32)) * 255 @@ -46,4 +46,4 @@ with open("interpolations.pkl", "wb") as f: pickle.dump(save_object, f) -plt.show() \ No newline at end of file +plt.show() diff --git a/examples/variational_autoencoder/autoencoder_models/variational_autoencoder.py b/examples/variational_autoencoder/autoencoder_models/variational_autoencoder.py index a3363c7..14c186c 100644 --- a/examples/variational_autoencoder/autoencoder_models/variational_autoencoder.py +++ b/examples/variational_autoencoder/autoencoder_models/variational_autoencoder.py @@ -12,59 +12,137 @@ sizes of convolutional neural networks """ + def num2tuple(num): return num if isinstance(num, tuple) else (num, num) + def conv2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1): - h_w, kernel_size, stride, pad, dilation = num2tuple(h_w), \ - num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(dilation) + h_w, kernel_size, stride, pad, dilation = ( + num2tuple(h_w), + num2tuple(kernel_size), + num2tuple(stride), + num2tuple(pad), + num2tuple(dilation), + ) pad = num2tuple(pad[0]), num2tuple(pad[1]) - - h = math.floor((h_w[0] + sum(pad[0]) - dilation[0]*(kernel_size[0]-1) - 1) / stride[0] + 1) - w = math.floor((h_w[1] + sum(pad[1]) - dilation[1]*(kernel_size[1]-1) - 1) / stride[1] + 1) - + + h = math.floor( + (h_w[0] + sum(pad[0]) - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1 + ) + w = math.floor( + (h_w[1] + sum(pad[1]) - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1 + ) + return h, w -def convtransp2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0): - h_w, kernel_size, stride, pad, dilation, out_pad = num2tuple(h_w), \ - num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(dilation), num2tuple(out_pad) + +def convtransp2d_output_shape( + h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0 +): + h_w, kernel_size, stride, pad, dilation, out_pad = ( + num2tuple(h_w), + num2tuple(kernel_size), + num2tuple(stride), + num2tuple(pad), + num2tuple(dilation), + num2tuple(out_pad), + ) pad = num2tuple(pad[0]), num2tuple(pad[1]) - - h = (h_w[0] - 1)*stride[0] - sum(pad[0]) + dialation[0]*(kernel_size[0]-1) + out_pad[0] + 1 - w = (h_w[1] - 1)*stride[1] - sum(pad[1]) + dialation[1]*(kernel_size[1]-1) + out_pad[1] + 1 - + + h = ( + (h_w[0] - 1) * stride[0] + - sum(pad[0]) + + dialation[0] * (kernel_size[0] - 1) + + out_pad[0] + + 1 + ) + w = ( + (h_w[1] - 1) * stride[1] + - sum(pad[1]) + + dialation[1] * (kernel_size[1] - 1) + + out_pad[1] + + 1 + ) + return h, w + def conv2d_get_padding(h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1): - h_w_in, h_w_out, kernel_size, stride, dilation = num2tuple(h_w_in), num2tuple(h_w_out), \ - num2tuple(kernel_size), num2tuple(stride), num2tuple(dilation) - - p_h = ((h_w_out[0] - 1)*stride[0] - h_w_in[0] + dilation[0]*(kernel_size[0]-1) + 1) - p_w = ((h_w_out[1] - 1)*stride[1] - h_w_in[1] + dilation[1]*(kernel_size[1]-1) + 1) - - return (math.floor(p_h/2), math.ceil(p_h/2)), (math.floor(p_w/2), math.ceil(p_w/2)) - -def convtransp2d_get_padding(h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1, out_pad=0): - h_w_in, h_w_out, kernel_size, stride, dilation, out_pad = num2tuple(h_w_in), num2tuple(h_w_out), \ - num2tuple(kernel_size), num2tuple(stride), num2tuple(dilation), num2tuple(out_pad) - - p_h = -(h_w_out[0] - 1 - out_pad[0] - dilation[0]*(kernel_size[0]-1) - (h_w_in[0] - 1)*stride[0]) / 2 - p_w = -(h_w_out[1] - 1 - out_pad[1] - dilation[1]*(kernel_size[1]-1) - (h_w_in[1] - 1)*stride[1]) / 2 - - return (math.floor(p_h/2), math.ceil(p_h/2)), (math.floor(p_w/2), math.ceil(p_w/2)) + h_w_in, h_w_out, kernel_size, stride, dilation = ( + num2tuple(h_w_in), + num2tuple(h_w_out), + num2tuple(kernel_size), + num2tuple(stride), + num2tuple(dilation), + ) + + p_h = ( + (h_w_out[0] - 1) * stride[0] + - h_w_in[0] + + dilation[0] * (kernel_size[0] - 1) + + 1 + ) + p_w = ( + (h_w_out[1] - 1) * stride[1] + - h_w_in[1] + + dilation[1] * (kernel_size[1] - 1) + + 1 + ) + + return (math.floor(p_h / 2), math.ceil(p_h / 2)), ( + math.floor(p_w / 2), + math.ceil(p_w / 2), + ) + + +def convtransp2d_get_padding( + h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1, out_pad=0 +): + h_w_in, h_w_out, kernel_size, stride, dilation, out_pad = ( + num2tuple(h_w_in), + num2tuple(h_w_out), + num2tuple(kernel_size), + num2tuple(stride), + num2tuple(dilation), + num2tuple(out_pad), + ) + + p_h = ( + -( + h_w_out[0] + - 1 + - out_pad[0] + - dilation[0] * (kernel_size[0] - 1) + - (h_w_in[0] - 1) * stride[0] + ) + / 2 + ) + p_w = ( + -( + h_w_out[1] + - 1 + - out_pad[1] + - dilation[1] * (kernel_size[1] - 1) + - (h_w_in[1] - 1) * stride[1] + ) + / 2 + ) + + return (math.floor(p_h / 2), math.ceil(p_h / 2)), ( + math.floor(p_w / 2), + math.ceil(p_w / 2), + ) + def load_dataset(train=True, digit=None): # Transforms images to a PyTorch Tensor - tensor_transform = transforms.Compose([ - transforms.Pad(2), - transforms.ToTensor() - ]) - + tensor_transform = transforms.Compose([transforms.Pad(2), transforms.ToTensor()]) + # Download the MNIST Dataset - dataset = datasets.MNIST(root = "./data", - train = train, - download = True, - transform = tensor_transform) + dataset = datasets.MNIST( + root="./data", train=train, download=True, transform=tensor_transform + ) # Load specific image if not digit is None: idx = dataset.train_labels == digit @@ -73,12 +151,14 @@ def load_dataset(train=True, digit=None): return dataset + def load_vae_from_path(path, latent_dim): model = VAE(latent_dim) model.load_state_dict(torch.load(path)) - + return model + # Creating a PyTorch class # 28*28 ==> 9 ==> 28*28 class VAE(torch.nn.Module): @@ -96,13 +176,15 @@ def __init__(self, latent_dim=5, layer_count=4, channels=1): setattr(self, "conv%d" % (i + 1), nn.Conv2d(inputs, self.d * mul, 4, 2, 1)) setattr(self, "conv%d_bn" % (i + 1), nn.BatchNorm2d(self.d * mul)) h_w = (out_sizes[-1][-1], out_sizes[-1][-1]) - out_sizes.append(conv2d_output_shape(h_w, kernel_size=4, stride=2, pad=1, dilation=1)) + out_sizes.append( + conv2d_output_shape(h_w, kernel_size=4, stride=2, pad=1, dilation=1) + ) inputs = self.d * mul mul *= 2 self.d_max = inputs self.last_size = out_sizes[-1][-1] - self.num_linear = self.last_size ** 2 * self.d_max + self.num_linear = self.last_size**2 * self.d_max # Encoder linear layers self.encoder_mean_linear = nn.Linear(self.num_linear, self.latent_dim) self.encoder_logvar_linear = nn.Linear(self.num_linear, self.latent_dim) @@ -112,12 +194,20 @@ def __init__(self, latent_dim=5, layer_count=4, channels=1): mul = inputs // self.d // 2 for i in range(1, self.layer_count): - setattr(self, "deconv%d" % (i + 1), nn.ConvTranspose2d(inputs, self.d * mul, 4, 2, 1)) + setattr( + self, + "deconv%d" % (i + 1), + nn.ConvTranspose2d(inputs, self.d * mul, 4, 2, 1), + ) setattr(self, "deconv%d_bn" % (i + 1), nn.BatchNorm2d(self.d * mul)) inputs = self.d * mul mul //= 2 - setattr(self, "deconv%d" % (self.layer_count + 1), nn.ConvTranspose2d(inputs, self.channels, 4, 2, 1)) + setattr( + self, + "deconv%d" % (self.layer_count + 1), + nn.ConvTranspose2d(inputs, self.channels, 4, 2, 1), + ) def encode(self, x): if len(x.shape) < 3: @@ -127,7 +217,11 @@ def encode(self, x): batch_size = x.shape[0] for i in range(self.layer_count): - x = F.relu(getattr(self, "conv%d_bn" % (i + 1))(getattr(self, "conv%d" % (i + 1))(x))) + x = F.relu( + getattr(self, "conv%d_bn" % (i + 1))( + getattr(self, "conv%d" % (i + 1))(x) + ) + ) x = x.view(batch_size, -1) @@ -140,15 +234,20 @@ def decode(self, x): x = x.view(x.shape[0], self.latent_dim) x = self.decoder_linear(x) x = x.view(x.shape[0], self.d_max, self.last_size, self.last_size) - #x = self.deconv1_bn(x) + # x = self.deconv1_bn(x) x = F.leaky_relu(x, 0.2) for i in range(1, self.layer_count): - x = F.leaky_relu(getattr(self, "deconv%d_bn" % (i + 1))(getattr(self, "deconv%d" % (i + 1))(x)), 0.2) + x = F.leaky_relu( + getattr(self, "deconv%d_bn" % (i + 1))( + getattr(self, "deconv%d" % (i + 1))(x) + ), + 0.2, + ) x = getattr(self, "deconv%d" % (self.layer_count + 1))(x) x = torch.sigmoid(x) return x - + def forward(self, x): batch_size = x.shape[0] mean, logvar = self.encode(x) @@ -157,26 +256,25 @@ def forward(self, x): reconstructed = self.decode(z) return mean, logvar, reconstructed, x + def train_model(latent_dim=16, plot=True, digit=1, epochs=200): dataset = load_dataset(train=True, digit=digit) - # DataLoader is used to load the dataset + # DataLoader is used to load the dataset # for training - loader = torch.utils.data.DataLoader(dataset = dataset, - batch_size = 32, - shuffle = True) + loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=32, shuffle=True) # Model Initialization model = VAE(latent_dim=latent_dim) # Validation using MSE Loss function def loss_function(mean, log_var, reconstructed, original, kl_beta=0.0001): - kl = torch.mean(-0.5 * torch.sum(1 + log_var - mean ** 2 - log_var.exp(), dim = 1), dim = 0) + kl = torch.mean( + -0.5 * torch.sum(1 + log_var - mean**2 - log_var.exp(), dim=1), dim=0 + ) recon = torch.nn.functional.mse_loss(reconstructed, original) # print(f"KL Error {kl}, Recon Error {recon}") return kl_beta * kl + recon # Using an Adam Optimizer with lr = 0.1 - optimizer = torch.optim.Adam(model.parameters(), - lr = 1e-4, - weight_decay = 0e-8) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=0e-8) outputs = [] losses = [] @@ -198,22 +296,24 @@ def loss_function(mean, log_var, reconstructed, original, kl_beta=0.0001): losses.append(loss.detach().cpu()) outputs.append((epochs, image, reconstructed)) - torch.save(model.state_dict(), + torch.save( + model.state_dict(), os.path.join( - os.environ["PROJECT_ROOT"], - f"examples/variational_autoencoder/autoencoder_model/saved_models/model_dim{latent_dim}.pth" - ) + os.environ["PROJECT_ROOT"], + f"examples/variational_autoencoder/autoencoder_model/saved_models/model_dim{latent_dim}.pth", + ), ) if plot: # Defining the Plot Style - plt.style.use('fivethirtyeight') - plt.xlabel('Iterations') - plt.ylabel('Loss') - + plt.style.use("fivethirtyeight") + plt.xlabel("Iterations") + plt.ylabel("Loss") + # Plotting the last 100 values plt.plot(losses) plt.show() + if __name__ == "__main__": train_model(latent_dim=2, digit=2, epochs=40) diff --git a/examples/variational_autoencoder/variational_autoencoder.py b/examples/variational_autoencoder/variational_autoencoder.py index 87fd359..e661220 100644 --- a/examples/variational_autoencoder/variational_autoencoder.py +++ b/examples/variational_autoencoder/variational_autoencoder.py @@ -21,22 +21,23 @@ config.frame_height = 7.0 config.frame_width = 7.0 + class VAEScene(Scene): """Scene object for a Variational Autoencoder and Autoencoder""" def construct(self): - - numpy_image = np.asarray(Image.open(ROOT_DIR / 'assets/mnist/digit.jpeg')) - vae = NeuralNetwork([ - ImageLayer(numpy_image, height=1.4), - FeedForwardLayer(5), - FeedForwardLayer(3), - EmbeddingLayer(dist_theme="ellipse"), - FeedForwardLayer(3), - FeedForwardLayer(5), - ImageLayer(numpy_image, height=1.4), - ]) - + numpy_image = np.asarray(Image.open(ROOT_DIR / "assets/mnist/digit.jpeg")) + vae = NeuralNetwork( + [ + ImageLayer(numpy_image, height=1.4), + FeedForwardLayer(5), + FeedForwardLayer(3), + EmbeddingLayer(dist_theme="ellipse"), + FeedForwardLayer(3), + FeedForwardLayer(5), + ImageLayer(numpy_image, height=1.4), + ] + ) self.play(Create(vae)) - self.play(vae.make_forward_pass_animation(run_time=15)) \ No newline at end of file + self.play(vae.make_forward_pass_animation(run_time=15)) diff --git a/manim_ml/decision_tree/decision_tree.py b/manim_ml/decision_tree/decision_tree.py index 689a9e2..73304a9 100644 --- a/manim_ml/decision_tree/decision_tree.py +++ b/manim_ml/decision_tree/decision_tree.py @@ -5,20 +5,27 @@ from manim import * from manim_ml.one_to_one_sync import OneToOneSync + class LeafNode(VGroup): pass + class NonLeafNode(VGroup): pass + class DecisionTreeDiagram(Graph): """Decision Tree Digram Class for Manim""" + pass -class DecisionTreeEmbedding(): + +class DecisionTreeEmbedding: """Embedding for the decision tree""" + pass + class DecisionTreeContainer(OneToOneSync): """Connects the DecisionTreeDiagram to the DecisionTreeEmbedding""" diff --git a/manim_ml/flow/flow.py b/manim_ml/flow/flow.py index d8608ac..983cc4a 100644 --- a/manim_ml/flow/flow.py +++ b/manim_ml/flow/flow.py @@ -3,15 +3,20 @@ """ from manim import * + class FlowGraph(VGroup): """Graph container""" + pass + class FlowNode(VGroup): """Node in the FlowGraph""" + pass + class DataNode(FlowNode): """Node that outputs data""" - pass + pass diff --git a/manim_ml/gridded_rectangle.py b/manim_ml/gridded_rectangle.py index 26f1eda..f3e908c 100644 --- a/manim_ml/gridded_rectangle.py +++ b/manim_ml/gridded_rectangle.py @@ -1,14 +1,27 @@ from manim import * import numpy as np + class GriddedRectangle(VGroup): """Rectangle object with grid lines""" - def __init__(self, color=ORANGE, height=2.0, width=4.0, - mark_paths_closed=True, close_new_points=True, - grid_xstep=None, grid_ystep=None, grid_stroke_width=0.0, #DEFAULT_STROKE_WIDTH/2, - grid_stroke_color=ORANGE, grid_stroke_opacity=1.0, - stroke_width=2.0, fill_opacity=0.2, show_grid_lines=False, **kwargs): + def __init__( + self, + color=ORANGE, + height=2.0, + width=4.0, + mark_paths_closed=True, + close_new_points=True, + grid_xstep=None, + grid_ystep=None, + grid_stroke_width=0.0, # DEFAULT_STROKE_WIDTH/2, + grid_stroke_color=ORANGE, + grid_stroke_opacity=1.0, + stroke_width=2.0, + fill_opacity=0.2, + show_grid_lines=False, + **kwargs + ): super().__init__() # Fields self.mark_paths_closed = mark_paths_closed @@ -25,8 +38,8 @@ def __init__(self, color=ORANGE, height=2.0, width=4.0, self.show_grid_lines = show_grid_lines # Make rectangle self.rectangle = Rectangle( - width=width, - height=height, + width=width, + height=height, color=color, stroke_width=stroke_width, fill_color=color, @@ -44,29 +57,21 @@ def make_corners_dict(self): """Make corners dictionary""" corners_dict = { "top_right": Dot( - self.rectangle.get_corner([1, 1, 0]), - fill_opacity=0.0, - radius=0.0 + self.rectangle.get_corner([1, 1, 0]), fill_opacity=0.0, radius=0.0 ), "top_left": Dot( - self.rectangle.get_corner([-1, 1, 0]), - fill_opacity=0.0, - radius=0.0 + self.rectangle.get_corner([-1, 1, 0]), fill_opacity=0.0, radius=0.0 ), "bottom_left": Dot( - self.rectangle.get_corner([-1, -1, 0]), - fill_opacity=0.0, - radius=0.0 + self.rectangle.get_corner([-1, -1, 0]), fill_opacity=0.0, radius=0.0 ), "bottom_right": Dot( - self.rectangle.get_corner([1, -1, 0]), - fill_opacity=0.0, - radius=0.0 + self.rectangle.get_corner([1, -1, 0]), fill_opacity=0.0, radius=0.0 ), } return corners_dict - + def get_corners_dict(self): """Returns a dictionary of the corners""" # Sort points through clockwise rotation of a vector in the xy plane @@ -87,13 +92,13 @@ def make_grid_lines(self): v[1] + i * grid_xstep * RIGHT + self.height * DOWN, stroke_color=self.grid_stroke_color, stroke_width=self.grid_stroke_width, - stroke_opacity = self.grid_stroke_opacity + stroke_opacity=self.grid_stroke_opacity, ) for i in range(1, count) ) ) grid_lines.add(grid) - + if self.grid_ystep is not None: grid_ystep = abs(self.grid_ystep) count = int(self.height / grid_ystep) @@ -103,12 +108,12 @@ def make_grid_lines(self): v[1] + i * grid_ystep * DOWN, v[1] + i * grid_ystep * DOWN + self.width * RIGHT, stroke_color=self.grid_stroke_color, - stroke_width = self.grid_stroke_width, - stroke_opacity = self.grid_stroke_opacity + stroke_width=self.grid_stroke_width, + stroke_opacity=self.grid_stroke_opacity, ) for i in range(1, count) ) - ) + ) grid_lines.add(grid) return grid_lines diff --git a/manim_ml/image.py b/manim_ml/image.py index 9d3acbf..bb1ec64 100644 --- a/manim_ml/image.py +++ b/manim_ml/image.py @@ -2,13 +2,14 @@ import numpy as np from PIL import Image + class GrayscaleImageMobject(ImageMobject): """Mobject for creating images in Manim from numpy arrays""" def __init__(self, numpy_image, height=2.3): self.numpy_image = numpy_image - assert len(np.shape(self.numpy_image)) == 2 + assert len(np.shape(self.numpy_image)) == 2 input_image = self.numpy_image[None, :, :] # Convert grayscale to rgb version of grayscale input_image = np.repeat(input_image, 3, axis=0) @@ -31,11 +32,13 @@ def from_path(cls, path, height=2.3): def create(self, run_time=2): return FadeIn(self) + class LabeledColorImage(Group): """Labeled Color Image""" - def __init__(self, image, color=RED, label="Positive", stroke_width=5, - font_size=24, buff=0.2): + def __init__( + self, image, color=RED, label="Positive", stroke_width=5, font_size=24, buff=0.2 + ): super().__init__() self.image = image self.color = color @@ -46,12 +49,9 @@ def __init__(self, image, color=RED, label="Positive", stroke_width=5, text = Text(label, font_size=self.font_size) text.next_to(self.image, UP, buff=buff) rectangle = SurroundingRectangle( - self.image, - color=color, - buff=0.0, - stroke_width=self.stroke_width + self.image, color=color, buff=0.0, stroke_width=self.stroke_width ) self.add(text) self.add(rectangle) - self.add(self.image) \ No newline at end of file + self.add(self.image) diff --git a/manim_ml/lazy_animation.py b/manim_ml/lazy_animation.py index 4bb4f99..ba19711 100644 --- a/manim_ml/lazy_animation.py +++ b/manim_ml/lazy_animation.py @@ -1,7 +1,7 @@ from manim import * -class LazyAnimation(Animation): +class LazyAnimation(Animation): def __init__(self, animation_function): self.animation_function = animation_function super.__init__() @@ -10,4 +10,4 @@ def begin(self): update_func_anim = UpdateFromFunc(self.neural_network, create_new_connective) self.add - super.begin() \ No newline at end of file + super.begin() diff --git a/manim_ml/list_group.py b/manim_ml/list_group.py index 18e932d..1178264 100644 --- a/manim_ml/list_group.py +++ b/manim_ml/list_group.py @@ -1,4 +1,5 @@ -from manim import * +from manim import * + class ListGroup(Mobject): """Indexable Group with traditional list operations""" @@ -10,7 +11,7 @@ def __init__(self, *layers): def __getitem__(self, indices): """Traditional list indexing""" return self.items[indices] - + def insert(self, index, item): """Inserts item at index""" self.items.insert(index, item) @@ -39,7 +40,7 @@ def remove(self, item): """Removes first instance of item""" self.items.remove(item) self.submobjects = self.items - + return item def get(self, index): @@ -76,7 +77,7 @@ def __iter__(self): self.current_index = -1 return self - def __next__(self): # Python 2: def next(self) + def __next__(self): # Python 2: def next(self) self.current_index += 1 if self.current_index < len(self.items): return self.items[self.current_index] diff --git a/manim_ml/manifold.py b/manim_ml/manifold.py index 67c1d9e..72c68ff 100644 --- a/manim_ml/manifold.py +++ b/manim_ml/manifold.py @@ -1,3 +1,3 @@ """ Visaulization of a latent Manifold -""" \ No newline at end of file +""" diff --git a/manim_ml/neural_network/layers/__init__.py b/manim_ml/neural_network/layers/__init__.py index cfd683f..28f9891 100644 --- a/manim_ml/neural_network/layers/__init__.py +++ b/manim_ml/neural_network/layers/__init__.py @@ -1,5 +1,9 @@ -from manim_ml.neural_network.layers.convolutional_3d_to_feed_forward import Convolutional3DToFeedForward -from manim_ml.neural_network.layers.image_to_convolutional3d import ImageToConvolutional3DLayer +from manim_ml.neural_network.layers.convolutional_3d_to_feed_forward import ( + Convolutional3DToFeedForward, +) +from manim_ml.neural_network.layers.image_to_convolutional3d import ( + ImageToConvolutional3DLayer, +) from .convolutional3d_to_convolutional3d import Convolutional3DToConvolutional3D from .convolutional2d_to_convolutional2d import Convolutional2DToConvolutional2D from .convolutional3d import Convolutional3DLayer @@ -33,5 +37,5 @@ Convolutional3DToConvolutional3D, Convolutional2DToConvolutional2D, ImageToConvolutional3DLayer, - Convolutional3DToFeedForward + Convolutional3DToFeedForward, ) diff --git a/manim_ml/neural_network/layers/convolutional2d.py b/manim_ml/neural_network/layers/convolutional2d.py index 24ed9d6..9ecfbe0 100644 --- a/manim_ml/neural_network/layers/convolutional2d.py +++ b/manim_ml/neural_network/layers/convolutional2d.py @@ -2,11 +2,20 @@ from matplotlib import animation from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer + class Convolutional2DLayer(VGroupNeuralNetworkLayer): - - def __init__(self, feature_map_height, feature_map_width, filter_width, filter_height, - stride=1, cell_width=0.5, feature_map_color=BLUE, filter_color=ORANGE, - **kwargs): + def __init__( + self, + feature_map_height, + feature_map_width, + filter_width, + filter_height, + stride=1, + cell_width=0.5, + feature_map_color=BLUE, + filter_color=ORANGE, + **kwargs + ): super(VGroupNeuralNetworkLayer, self).__init__(**kwargs) self.feature_map_height = feature_map_height self.feature_map_width = feature_map_width @@ -24,10 +33,10 @@ def construct_feature_map(self): # Make feature map rectangle self.feature_map = Rectangle( width=self.feature_map_width * self.cell_width, - height=self.feature_map_height * self.cell_width, + height=self.feature_map_height * self.cell_width, color=self.feature_map_color, grid_xstep=self.cell_width, - grid_ystep=self.cell_width + grid_ystep=self.cell_width, ) self.add(self.feature_map) @@ -38,4 +47,4 @@ def _create_override(self, **kwargs): def make_forward_pass_animation(self, **kwargs): """Make feed forward animation""" - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py b/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py index 6798027..1666d08 100644 --- a/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py +++ b/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py @@ -3,15 +3,30 @@ from manim_ml.neural_network.layers.convolutional2d import Convolutional2DLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + class Convolutional2DToConvolutional2D(ConnectiveLayer): """2D Conv to 2d Conv""" + input_class = Convolutional2DLayer output_class = Convolutional2DLayer - def __init__(self, input_layer, output_layer, color=WHITE, - filter_opacity=0.3, line_color=WHITE, pulse_color=ORANGE, **kwargs): - super().__init__(input_layer, output_layer, input_class=Convolutional2DLayer, - output_class=Convolutional2DLayer, **kwargs) + def __init__( + self, + input_layer, + output_layer, + color=WHITE, + filter_opacity=0.3, + line_color=WHITE, + pulse_color=ORANGE, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=Convolutional2DLayer, + output_class=Convolutional2DLayer, + **kwargs + ) self.color = color self.filter_color = self.input_layer.filter_color self.filter_width = self.input_layer.filter_width @@ -27,7 +42,7 @@ def __init__(self, input_layer, output_layer, color=WHITE, @override_animation(Create) def _create_override(self, **kwargs): return AnimationGroup() - + def make_filter(self): """Make filter object""" # Make opaque rectangle @@ -38,10 +53,12 @@ def make_filter(self): height=self.cell_width * self.filter_height, grid_xstep=self.cell_width, grid_ystep=self.cell_width, - fill_opacity=self.filter_opacity + fill_opacity=self.filter_opacity, ) # Move filter to top left of feature map - filter.move_to(self.input_layer.feature_map.get_corner(LEFT + UP), aligned_edge=LEFT + UP) + filter.move_to( + self.input_layer.feature_map.get_corner(LEFT + UP), aligned_edge=LEFT + UP + ) return filter @@ -53,10 +70,12 @@ def make_output_node(self): fill_color=self.filter_color, width=self.cell_width, height=self.cell_width, - fill_opacity=self.filter_opacity + fill_opacity=self.filter_opacity, ) # Move filter to top left of feature map - filter.move_to(self.output_layer.feature_map.get_corner(LEFT + UP), aligned_edge=LEFT + UP) + filter.move_to( + self.output_layer.feature_map.get_corner(LEFT + UP), aligned_edge=LEFT + UP + ) return filter @@ -67,7 +86,7 @@ def make_filter_propagation_animation(self): Create(lines_copy, lag_ratio=0.0), # FadeOut(self.filter_lines), FadeOut(lines_copy), - lag_ratio=1.0 + lag_ratio=1.0, ) return animation_group @@ -90,7 +109,7 @@ def filter_updater(filter_lines): line = filter_lines[corner_index] filter_corner = self.filter.get_corner(corner_direction) output_corner = self.output_node.get_corner(corner_direction) - #line._set_start_and_end_attrs(filter_corner, output_corner) + # line._set_start_and_end_attrs(filter_corner, output_corner) # line.put_start_and_end_on(filter_corner, output_corner) line.set_points_by_ends(filter_corner, output_corner) # line._set_start_and_end_attrs(filter_corner, output_corner) @@ -111,7 +130,7 @@ def make_assets(self): # Make filter lines self.filter_lines = self.make_filter_lines() self.add(self.filter_lines) - + super().set_z_index(5) def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): @@ -127,31 +146,39 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): AnimationGroup( Create(self.filter), Create(self.output_node), - # Create(self.filter_lines) + # Create(self.filter_lines) ) ) # Make scan filter animation - num_y_moves = int((self.feature_map_height - self.filter_height) / self.stride) + 1 + num_y_moves = ( + int((self.feature_map_height - self.filter_height) / self.stride) + 1 + ) num_x_moves = int((self.feature_map_width - self.filter_width) / self.stride) for y_location in range(num_y_moves): if y_location > 0: # Shift filter back to start and down shift_animation = ApplyMethod( self.filter.shift, - np.array([ - -self.cell_width * (self.feature_map_width - self.filter_width), - -self.stride * self.cell_width, - 0 - ]) + np.array( + [ + -self.cell_width + * (self.feature_map_width - self.filter_width), + -self.stride * self.cell_width, + 0, + ] + ), ) # Shift output node shift_output_node = ApplyMethod( self.output_node.shift, - np.array([ - -(self.output_layer.feature_map_width - 1) * self.cell_width, - -self.cell_width, - 0 - ]) + np.array( + [ + -(self.output_layer.feature_map_width - 1) + * self.cell_width, + -self.cell_width, + 0, + ] + ), ) # Make animation group animation_group = AnimationGroup( @@ -167,13 +194,11 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): for x_location in range(num_x_moves): # Shift filter right shift_animation = ApplyMethod( - self.filter.shift, - np.array([self.stride * self.cell_width, 0, 0]) + self.filter.shift, np.array([self.stride * self.cell_width, 0, 0]) ) # Shift output node shift_output_node = ApplyMethod( - self.output_node.shift, - np.array([self.cell_width, 0, 0]) + self.output_node.shift, np.array([self.cell_width, 0, 0]) ) # Make animation group animation_group = AnimationGroup( @@ -183,7 +208,11 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): animations.append(animation_group) # Make filter passing flash old_z_index = self.filter_lines.z_index - lines_copy = self.filter_lines.copy().set_color(ORANGE).set_z_index(old_z_index + 1) + lines_copy = ( + self.filter_lines.copy() + .set_color(ORANGE) + .set_z_index(old_z_index + 1) + ) # self.add(lines_copy) # self.lines_copies.add(lines_copy) animations.append(Create(self.filter_lines, lag_ratio=0.0)) @@ -197,14 +226,11 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): AnimationGroup( FadeOut(self.filter), FadeOut(self.output_node), - FadeOut(self.filter_lines) + FadeOut(self.filter_lines), ) ) # Make animation group - animation_group = Succession( - *animations, - lag_ratio=1.0 - ) + animation_group = Succession(*animations, lag_ratio=1.0) return animation_group def set_z_index(self, z_index, family=False): @@ -213,4 +239,4 @@ def set_z_index(self, z_index, family=False): def scale(self, scale_factor, **kwargs): self.cell_width *= scale_factor - super().scale(scale_factor, **kwargs) \ No newline at end of file + super().scale(scale_factor, **kwargs) diff --git a/manim_ml/neural_network/layers/convolutional3d.py b/manim_ml/neural_network/layers/convolutional3d.py index 74a0432..d01a232 100644 --- a/manim_ml/neural_network/layers/convolutional3d.py +++ b/manim_ml/neural_network/layers/convolutional3d.py @@ -1,14 +1,32 @@ from manim import * -from manim_ml.neural_network.layers.parent_layers import ThreeDLayer, VGroupNeuralNetworkLayer +from manim_ml.neural_network.layers.parent_layers import ( + ThreeDLayer, + VGroupNeuralNetworkLayer, +) from manim_ml.gridded_rectangle import GriddedRectangle import numpy as np + class Convolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer): """Handles rendering a convolutional layer for a nn""" - def __init__(self, num_feature_maps, feature_map_width, feature_map_height, - filter_width, filter_height, cell_width=0.2, filter_spacing=0.1, color=BLUE, - pulse_color=ORANGE, show_grid_lines=False, filter_color=ORANGE, stride=1, stroke_width=2.0, **kwargs): + def __init__( + self, + num_feature_maps, + feature_map_width, + feature_map_height, + filter_width, + filter_height, + cell_width=0.2, + filter_spacing=0.1, + color=BLUE, + pulse_color=ORANGE, + show_grid_lines=False, + filter_color=ORANGE, + stride=1, + stroke_width=2.0, + **kwargs, + ): super().__init__(**kwargs) self.num_feature_maps = num_feature_maps self.feature_map_height = feature_map_height @@ -29,9 +47,9 @@ def __init__(self, num_feature_maps, feature_map_width, feature_map_height, # Rotate stuff properly # normal_vector = self.feature_maps[0].get_normal_vector() self.rotate( - ThreeDLayer.rotation_angle, - about_point=self.get_center(), - axis=ThreeDLayer.rotation_axis + ThreeDLayer.rotation_angle, + about_point=self.get_center(), + axis=ThreeDLayer.rotation_axis, ) """ self.rotate( @@ -47,35 +65,29 @@ def construct_feature_maps(self): feature_maps = [] for filter_index in range(self.num_feature_maps): rectangle = GriddedRectangle( - color=self.color, + color=self.color, height=self.feature_map_height * self.cell_width, width=self.feature_map_width * self.cell_width, fill_color=self.color, - fill_opacity=0.2, + fill_opacity=0.2, stroke_color=self.color, stroke_width=self.stroke_width, grid_xstep=self.cell_width, grid_ystep=self.cell_width, - grid_stroke_width=self.stroke_width/2, + grid_stroke_width=self.stroke_width / 2, grid_stroke_color=self.color, show_grid_lines=self.show_grid_lines, ) # Move the feature map - rectangle.move_to( - [0, 0, filter_index * self.filter_spacing] - ) + rectangle.move_to([0, 0, filter_index * self.filter_spacing]) rectangle.set_z_index(4) feature_maps.append(rectangle) return VGroup(*feature_maps) def make_forward_pass_animation( - self, - run_time=5, - corner_pulses=False, - layer_args={}, - **kwargs - ): + self, run_time=5, corner_pulses=False, layer_args={}, **kwargs + ): """Convolution forward pass animation""" # Note: most of this animation is done in the Convolution3DToConvolution3D layer print(f"Corner pulses: {corner_pulses}") @@ -84,12 +96,10 @@ def make_forward_pass_animation( passing_flashes = [] for line in self.corner_lines: pulse = ShowPassingFlash( - line.copy() - .set_color(self.pulse_color) - .set_stroke(opacity=1.0), + line.copy().set_color(self.pulse_color).set_stroke(opacity=1.0), time_width=0.5, run_time=run_time, - rate_func=rate_functions.linear + rate_func=rate_functions.linear, ) passing_flashes.append(pulse) @@ -97,7 +107,7 @@ def make_forward_pass_animation( # Make animation group animation_group = AnimationGroup( *passing_flashes, - # filter_flashes + # filter_flashes ) else: animation_group = AnimationGroup() diff --git a/manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py b/manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py index b76156f..c3082e2 100644 --- a/manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py +++ b/manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py @@ -5,19 +5,20 @@ from manim.utils.space_ops import rotation_matrix + class Filters(VGroup): """Group for showing a collection of filters connecting two layers""" def __init__( - self, - input_layer, - output_layer, - line_color=ORANGE, - cell_width=1.0, - stroke_width=2.0, - show_grid_lines=False, - output_feature_map_to_connect=None # None means all at once - ): + self, + input_layer, + output_layer, + line_color=ORANGE, + cell_width=1.0, + stroke_width=2.0, + show_grid_lines=False, + output_feature_map_to_connect=None, # None means all at once + ): super().__init__() self.input_layer = input_layer self.output_layer = output_layer @@ -46,7 +47,7 @@ def make_input_feature_map_rectangles(self): for index, feature_map in enumerate(self.input_layer.feature_maps): rectangle = GriddedRectangle( - width=rectangle_width, + width=rectangle_width, height=rectangle_height, fill_color=filter_color, stroke_color=filter_color, @@ -60,9 +61,9 @@ def make_input_feature_map_rectangles(self): ) # normal_vector = rectangle.get_normal_vector() rectangle.rotate( - ThreeDLayer.rotation_angle, - about_point=rectangle.get_center(), - axis=ThreeDLayer.rotation_axis + ThreeDLayer.rotation_angle, + about_point=rectangle.get_center(), + axis=ThreeDLayer.rotation_axis, ) # Move the rectangle to the corner of the feature map rectangle.next_to( @@ -74,7 +75,7 @@ def make_input_feature_map_rectangles(self): rectangle.set_z_index(5) rectangles.append(rectangle) - + feature_map_rectangles = VGroup(*rectangles) return feature_map_rectangles @@ -93,7 +94,7 @@ def make_output_feature_map_rectangles(self): continue # Make the rectangle rectangle = GriddedRectangle( - width=rectangle_width, + width=rectangle_width, height=rectangle_height, fill_color=filter_color, fill_opacity=0.2, @@ -101,15 +102,15 @@ def make_output_feature_map_rectangles(self): stroke_width=self.stroke_width, grid_xstep=self.cell_width, grid_ystep=self.cell_width, - grid_stroke_width=self.stroke_width/2, + grid_stroke_width=self.stroke_width / 2, grid_stroke_color=filter_color, show_grid_lines=self.show_grid_lines, ) # Rotate the rectangle rectangle.rotate( - ThreeDLayer.rotation_angle, - about_point=rectangle.get_center(), - axis=ThreeDLayer.rotation_axis + ThreeDLayer.rotation_angle, + about_point=rectangle.get_center(), + axis=ThreeDLayer.rotation_axis, ) # Move the rectangle to the corner location rectangle.next_to( @@ -119,7 +120,7 @@ def make_output_feature_map_rectangles(self): # aligned_edge=feature_map.get_corners_dict()["top_left"].get_center() ) rectangles.append(rectangle) - + feature_map_rectangles = VGroup(*rectangles) return feature_map_rectangles @@ -188,74 +189,86 @@ def make_input_to_output_connective_lines(): ) lines.append(line) - return VGroup(*lines) - + return VGroup(*lines) + input_lines = make_input_connective_lines() output_lines = make_output_connective_lines() input_output_lines = make_input_to_output_connective_lines() - connective_lines = VGroup( - *input_lines, - *output_lines, - *input_output_lines - ) - + connective_lines = VGroup(*input_lines, *output_lines, *input_output_lines) + return connective_lines @override_animation(Create) def _create_override(self, **kwargs): """ - NOTE This create override animation - is a workaround to make sure that the filter - does not show up in the scene before the create animation. + NOTE This create override animation + is a workaround to make sure that the filter + does not show up in the scene before the create animation. - Without this override the filters were shown at the beginning - of the neural network forward pass animimation - instead of just when the filters were supposed to appear. - I think this is a bug with Succession in the core - Manim Community Library. + Without this override the filters were shown at the beginning + of the neural network forward pass animimation + instead of just when the filters were supposed to appear. + I think this is a bug with Succession in the core + Manim Community Library. - TODO Fix this + TODO Fix this """ + def add_content(object): - object.add(self.input_rectangles) - object.add(self.connective_lines) - object.add(self.output_rectangles) + object.add(self.input_rectangles) + object.add(self.connective_lines) + object.add(self.output_rectangles) return object - return ApplyFunction( - add_content, - self - ) + return ApplyFunction(add_content, self) return AnimationGroup( Create(self.input_rectangles), Create(self.connective_lines), Create(self.output_rectangles), - lag_ratio=0.0 + lag_ratio=0.0, ) - + def make_pulse_animation(self, shift_amount): """Make animation of the filter pulsing""" passing_flash = ShowPassingFlash( - self.connective_lines.shift(shift_amount).set_stroke_width(self.stroke_width*1.5), + self.connective_lines.shift(shift_amount).set_stroke_width( + self.stroke_width * 1.5 + ), time_width=0.2, color=RED, - z_index=10 + z_index=10, ) return passing_flash + class Convolutional3DToConvolutional3D(ConnectiveLayer, ThreeDLayer): """Feed Forward to Embedding Layer""" + input_class = Convolutional3DLayer output_class = Convolutional3DLayer - def __init__(self, input_layer: Convolutional3DLayer, output_layer: Convolutional3DLayer, - color=ORANGE, filter_opacity=0.3, line_color=ORANGE, - pulse_color=ORANGE, cell_width=0.2, show_grid_lines=True, **kwargs): - super().__init__(input_layer, output_layer, input_class=Convolutional3DLayer, - output_class=Convolutional3DLayer, **kwargs) + def __init__( + self, + input_layer: Convolutional3DLayer, + output_layer: Convolutional3DLayer, + color=ORANGE, + filter_opacity=0.3, + line_color=ORANGE, + pulse_color=ORANGE, + cell_width=0.2, + show_grid_lines=True, + **kwargs, + ): + super().__init__( + input_layer, + output_layer, + input_class=Convolutional3DLayer, + output_class=Convolutional3DLayer, + **kwargs, + ) self.color = color self.filter_color = self.input_layer.filter_color self.filter_width = self.input_layer.filter_width @@ -274,17 +287,14 @@ def __init__(self, input_layer: Convolutional3DLayer, output_layer: Convolutiona def get_rotated_shift_vectors(self): """ - Rotates the shift vectors + Rotates the shift vectors """ # Make base shift vectors right_shift = np.array([self.input_layer.cell_width, 0, 0]) down_shift = np.array([0, -self.input_layer.cell_width, 0]) # Make rotation matrix - rot_mat = rotation_matrix( - ThreeDLayer.rotation_angle, - ThreeDLayer.rotation_axis - ) - # Rotate the vectors + rot_mat = rotation_matrix(ThreeDLayer.rotation_angle, ThreeDLayer.rotation_axis) + # Rotate the vectors right_shift = np.dot(right_shift, rot_mat.T) down_shift = np.dot(down_shift, rot_mat.T) @@ -295,58 +305,44 @@ def animate_filters_all_at_once(self, filters): animations = [] # Make filters filters = Filters( - self.input_layer, + self.input_layer, self.output_layer, line_color=self.color, cell_width=self.cell_width, show_grid_lines=self.show_grid_lines, - output_feature_map_to_connect=None # None means all at once - ) - animations.append( - Create(filters) + output_feature_map_to_connect=None, # None means all at once ) + animations.append(Create(filters)) # Get the rotated shift vectors right_shift, down_shift = self.get_rotated_shift_vectors() left_shift = -1 * right_shift - # Make the animation + # Make the animation num_y_moves = int((self.feature_map_height - self.filter_height) / self.stride) num_x_moves = int((self.feature_map_width - self.filter_width) / self.stride) for y_move in range(num_y_moves): # Go right num_x_moves for x_move in range(num_x_moves): # Shift right - shift_animation = ApplyMethod( - filters.shift, - self.stride * right_shift - ) + shift_animation = ApplyMethod(filters.shift, self.stride * right_shift) # shift_animation = self.animate.shift(right_shift) animations.append(shift_animation) - + # Go back left num_x_moves and down one - shift_amount = self.stride * num_x_moves * left_shift + self.stride * down_shift - # Make the animation - shift_animation = ApplyMethod( - filters.shift, - shift_amount + shift_amount = ( + self.stride * num_x_moves * left_shift + self.stride * down_shift ) + # Make the animation + shift_animation = ApplyMethod(filters.shift, shift_amount) animations.append(shift_animation) # Do last row move right for x_move in range(num_x_moves): # Shift right - shift_animation = ApplyMethod( - filters.shift, - self.stride * right_shift - ) + shift_animation = ApplyMethod(filters.shift, self.stride * right_shift) # shift_animation = self.animate.shift(right_shift) animations.append(shift_animation) # Remove the filters - animations.append( - FadeOut(filters) - ) - return Succession( - *animations, - lag_ratio=1.0 - ) + animations.append(FadeOut(filters)) + return Succession(*animations, lag_ratio=1.0) def animate_filters_one_at_a_time(self): """Animates each of the filters one at a time""" @@ -355,22 +351,24 @@ def animate_filters_one_at_a_time(self): for filter_index in range(len(output_feature_maps)): # Make filters filters = Filters( - self.input_layer, + self.input_layer, self.output_layer, line_color=self.color, cell_width=self.cell_width, show_grid_lines=self.show_grid_lines, - output_feature_map_to_connect=filter_index # None means all at once - ) - animations.append( - Create(filters) + output_feature_map_to_connect=filter_index, # None means all at once ) + animations.append(Create(filters)) # Get the rotated shift vectors right_shift, down_shift = self.get_rotated_shift_vectors() left_shift = -1 * right_shift - # Make the animation - num_y_moves = int((self.feature_map_height - self.filter_height) / self.stride) - num_x_moves = int((self.feature_map_width - self.filter_width) / self.stride) + # Make the animation + num_y_moves = int( + (self.feature_map_height - self.filter_height) / self.stride + ) + num_x_moves = int( + (self.feature_map_width - self.filter_width) / self.stride + ) for y_move in range(num_y_moves): # Go right num_x_moves for x_move in range(num_x_moves): @@ -381,48 +379,36 @@ def animate_filters_one_at_a_time(self): ) animations.append(pulse_animation) """ - z_index_animation = ApplyMethod( - filters.set_z_index, - 5 - ) + z_index_animation = ApplyMethod(filters.set_z_index, 5) animations.append(z_index_animation) # Shift right shift_animation = ApplyMethod( - filters.shift, - self.stride * right_shift + filters.shift, self.stride * right_shift ) # shift_animation = self.animate.shift(right_shift) animations.append(shift_animation) - + # Go back left num_x_moves and down one - shift_amount = self.stride * num_x_moves * left_shift + self.stride * down_shift - # Make the animation - shift_animation = ApplyMethod( - filters.shift, - shift_amount + shift_amount = ( + self.stride * num_x_moves * left_shift + self.stride * down_shift ) + # Make the animation + shift_animation = ApplyMethod(filters.shift, shift_amount) animations.append(shift_animation) # Do last row move right for x_move in range(num_x_moves): # Shift right - shift_animation = ApplyMethod( - filters.shift, - self.stride * right_shift - ) + shift_animation = ApplyMethod(filters.shift, self.stride * right_shift) # shift_animation = self.animate.shift(right_shift) animations.append(shift_animation) # Remove the filters - animations.append( - FadeOut(filters) - ) + animations.append(FadeOut(filters)) - return Succession( - *animations, - lag_ratio=1.0 - ) + return Succession(*animations, lag_ratio=1.0) - def make_forward_pass_animation(self, layer_args={}, - all_filters_at_once=False, run_time=10.5, **kwargs): + def make_forward_pass_animation( + self, layer_args={}, all_filters_at_once=False, run_time=10.5, **kwargs + ): """Forward pass animation from conv2d to conv2d""" print(f"All filters at once: {all_filters_at_once}") # Make filter shifting animations diff --git a/manim_ml/neural_network/layers/convolutional_3d_to_feed_forward.py b/manim_ml/neural_network/layers/convolutional_3d_to_feed_forward.py index 522fcda..539ac06 100644 --- a/manim_ml/neural_network/layers/convolutional_3d_to_feed_forward.py +++ b/manim_ml/neural_network/layers/convolutional_3d_to_feed_forward.py @@ -3,17 +3,29 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.convolutional3d import Convolutional3DLayer + class Convolutional3DToFeedForward(ConnectiveLayer, ThreeDLayer): """Feed Forward to Embedding Layer""" + input_class = Convolutional3DLayer output_class = FeedForwardLayer - def __init__(self, input_layer: Convolutional3DLayer, output_layer: FeedForwardLayer, - passing_flash_color=ORANGE, **kwargs): - super().__init__(input_layer, output_layer, input_class=Convolutional3DLayer, - output_class=Convolutional3DLayer, **kwargs) + def __init__( + self, + input_layer: Convolutional3DLayer, + output_layer: FeedForwardLayer, + passing_flash_color=ORANGE, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=Convolutional3DLayer, + output_class=Convolutional3DLayer, + **kwargs + ) self.passing_flash_color = passing_flash_color - + def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): """Forward pass animation from conv2d to conv2d""" animations = [] @@ -29,9 +41,8 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): line = Line(corner, node, stroke_width=1.0) line.set_z_index(self.output_layer.node_group.get_z_index()) anim = ShowPassingFlash( - line.set_color(self.passing_flash_color), - time_width=0.2 + line.set_color(self.passing_flash_color), time_width=0.2 ) animations.append(anim) - return AnimationGroup(*animations) \ No newline at end of file + return AnimationGroup(*animations) diff --git a/manim_ml/neural_network/layers/embedding.py b/manim_ml/neural_network/layers/embedding.py index e5fabf2..8a7c3fa 100644 --- a/manim_ml/neural_network/layers/embedding.py +++ b/manim_ml/neural_network/layers/embedding.py @@ -2,12 +2,19 @@ from manim_ml.probability import GaussianDistribution from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer + class EmbeddingLayer(VGroupNeuralNetworkLayer): """NeuralNetwork embedding object that can show probability distributions""" - def __init__(self, point_radius=0.02, mean = np.array([0, 0]), - covariance=np.array([[1.0, 0], [0, 1.0]]), dist_theme="gaussian", - paired_query_mode=False, **kwargs): + def __init__( + self, + point_radius=0.02, + mean=np.array([0, 0]), + covariance=np.array([[1.0, 0], [0, 1.0]]), + dist_theme="gaussian", + paired_query_mode=False, + **kwargs + ): super(VGroupNeuralNetworkLayer, self).__init__(**kwargs) self.gaussian_distributions = VGroup() self.add(self.gaussian_distributions) @@ -20,14 +27,8 @@ def __init__(self, point_radius=0.02, mean = np.array([0, 0]), y_length=0.8, x_range=(-1.4, 1.4), y_range=(-1.8, 1.8), - x_axis_config={ - "include_ticks": False, - "stroke_width": 0.0 - }, - y_axis_config={ - "include_ticks": False, - "stroke_width": 0.0 - } + x_axis_config={"include_ticks": False, "stroke_width": 0.0}, + y_axis_config={"include_ticks": False, "stroke_width": 0.0}, ) self.add(self.axes) self.axes.move_to(self.get_center()) @@ -35,7 +36,9 @@ def __init__(self, point_radius=0.02, mean = np.array([0, 0]), self.point_cloud = self.construct_gaussian_point_cloud(mean, covariance) self.add(self.point_cloud) # Make latent distribution - self.latent_distribution = GaussianDistribution(self.axes, mean=mean, cov=covariance) # Use defaults + self.latent_distribution = GaussianDistribution( + self.axes, mean=mean, cov=covariance + ) # Use defaults def add_gaussian_distribution(self, gaussian_distribution): """Adds given GaussianDistribution to the list""" @@ -57,15 +60,16 @@ def sample_point_location_from_distribution(self): point = np.random.multivariate_normal(mean, cov) # Make dot at correct location location = self.axes.coords_to_point(point[0], point[1]) - + return location def get_distribution_location(self): """Returns mean of latent distribution in axes frame""" return self.axes.coords_to_point(self.latent_distribution.mean) - def construct_gaussian_point_cloud(self, mean, covariance, point_color=WHITE, - num_points=400): + def construct_gaussian_point_cloud( + self, mean, covariance, point_color=WHITE, num_points=400 + ): """Plots points sampled from a Gaussian with the given mean and covariance""" # Sample points from a Gaussian np.random.seed(5) @@ -74,7 +78,7 @@ def construct_gaussian_point_cloud(self, mean, covariance, point_color=WHITE, point_dots = VGroup() for point in points: point_location = self.axes.coords_to_point(*point) - dot = Dot(point_location, color=point_color, radius=self.point_radius/2) + dot = Dot(point_location, color=point_color, radius=self.point_radius / 2) dot.set_z_index(-1) point_dots.add(dot) @@ -82,36 +86,27 @@ def construct_gaussian_point_cloud(self, mean, covariance, point_color=WHITE, def make_forward_pass_animation(self, layer_args={}, **kwargs): """Forward pass animation""" - animations = [] + animations = [] if "triplet_args" in layer_args: triplet_args = layer_args["triplet_args"] positive_dist_args = triplet_args["positive_dist"] negative_dist_args = triplet_args["negative_dist"] anchor_dist_args = triplet_args["anchor_dist"] # Create each dist - anchor_dist = GaussianDistribution( - self.axes, - **anchor_dist_args - ) + anchor_dist = GaussianDistribution(self.axes, **anchor_dist_args) animations.append(Create(anchor_dist)) - positive_dist = GaussianDistribution( - self.axes, - **positive_dist_args - ) + positive_dist = GaussianDistribution(self.axes, **positive_dist_args) animations.append(Create(positive_dist)) - negative_dist = GaussianDistribution( - self.axes, - **negative_dist_args - ) + negative_dist = GaussianDistribution(self.axes, **negative_dist_args) animations.append(Create(negative_dist)) # Draw edges in between anchor and positive, anchor and negative anchor_positive = Line( anchor_dist.get_center(), positive_dist.get_center(), color=GOLD, - stroke_width=DEFAULT_STROKE_WIDTH/2 + stroke_width=DEFAULT_STROKE_WIDTH / 2, ) anchor_positive.set_z_index(3) animations.append(Create(anchor_positive)) @@ -120,7 +115,7 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): anchor_dist.get_center(), negative_dist.get_center(), color=GOLD, - stroke_width=DEFAULT_STROKE_WIDTH/2 + stroke_width=DEFAULT_STROKE_WIDTH / 2, ) anchor_negative.set_z_index(3) @@ -132,14 +127,13 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): if "scale_factor" in layer_args: scale_factor = layer_args["scale_factor"] self.latent_distribution = GaussianDistribution( - self.axes, - **layer_args["dist_args"] + self.axes, **layer_args["dist_args"] ).scale(scale_factor) else: # Make ellipse object corresponding to the latent distribution # self.latent_distribution = GaussianDistribution( - # self.axes, - # dist_theme=self.dist_theme, + # self.axes, + # dist_theme=self.dist_theme, # cov=np.array([[0.8, 0], [0.0, 0.8]]) # ) pass @@ -153,15 +147,9 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): positive_dist_args = layer_args["positive_dist_args"] negative_dist_args = layer_args["negative_dist_args"] # Handle logic for embedding a paired query into the embedding layer - positive_dist = GaussianDistribution( - self.axes, - **positive_dist_args - ) + positive_dist = GaussianDistribution(self.axes, **positive_dist_args) self.gaussian_distributions.add(positive_dist) - negative_dist = GaussianDistribution( - self.axes, - **negative_dist_args - ) + negative_dist = GaussianDistribution(self.axes, **negative_dist_args) self.gaussian_distributions.add(negative_dist) animations.append(Create(positive_dist)) @@ -182,8 +170,8 @@ def _create_override(self, **kwargs): return point_animation -class NeuralNetworkEmbeddingTestScene(Scene): +class NeuralNetworkEmbeddingTestScene(Scene): def construct(self): nne = EmbeddingLayer() mean = np.array([0, 0]) @@ -195,4 +183,4 @@ def construct(self): gaussian = nne.construct_gaussian_distribution(mean, cov) nne.add(gaussian) - self.add(nne) \ No newline at end of file + self.add(nne) diff --git a/manim_ml/neural_network/layers/embedding_to_feed_forward.py b/manim_ml/neural_network/layers/embedding_to_feed_forward.py index 9053c78..1dcb4a5 100644 --- a/manim_ml/neural_network/layers/embedding_to_feed_forward.py +++ b/manim_ml/neural_network/layers/embedding_to_feed_forward.py @@ -3,15 +3,28 @@ from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer from manim_ml.neural_network.layers.embedding import EmbeddingLayer + class EmbeddingToFeedForward(ConnectiveLayer): """Feed Forward to Embedding Layer""" + input_class = EmbeddingLayer output_class = FeedForwardLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03, - **kwargs): - super().__init__(input_layer, output_layer, input_class=EmbeddingLayer, output_class=FeedForwardLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.03, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=EmbeddingLayer, + output_class=FeedForwardLayer, + **kwargs + ) self.feed_forward_layer = output_layer self.embedding_layer = input_layer self.animation_dot_color = animation_dot_color @@ -26,7 +39,9 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): # Move the dots to the centers of each of the nodes in the FeedForwardLayer dots = [] for node in self.feed_forward_layer.node_group: - new_dot = Dot(location, radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + location, radius=self.dot_radius, color=self.animation_dot_color + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(node.get_center()), @@ -46,4 +61,3 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): @override_animation(Create) def _create_override(self, **kwargs): return AnimationGroup() - \ No newline at end of file diff --git a/manim_ml/neural_network/layers/feed_forward.py b/manim_ml/neural_network/layers/feed_forward.py index 4beb9bd..6240b12 100644 --- a/manim_ml/neural_network/layers/feed_forward.py +++ b/manim_ml/neural_network/layers/feed_forward.py @@ -1,13 +1,25 @@ from manim import * from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer + class FeedForwardLayer(VGroupNeuralNetworkLayer): """Handles rendering a layer for a neural network""" - def __init__(self, num_nodes, layer_buffer=SMALL_BUFF/2, node_radius=0.08, - node_color=BLUE, node_outline_color=WHITE, rectangle_color=WHITE, - node_spacing=0.3, rectangle_fill_color=BLACK, node_stroke_width=2.0, - rectangle_stroke_width=2.0, animation_dot_color=RED, **kwargs): + def __init__( + self, + num_nodes, + layer_buffer=SMALL_BUFF / 2, + node_radius=0.08, + node_color=BLUE, + node_outline_color=WHITE, + rectangle_color=WHITE, + node_spacing=0.3, + rectangle_fill_color=BLACK, + node_stroke_width=2.0, + rectangle_stroke_width=2.0, + animation_dot_color=RED, + **kwargs + ): super(VGroupNeuralNetworkLayer, self).__init__(**kwargs) self.num_nodes = num_nodes self.layer_buffer = layer_buffer @@ -29,8 +41,11 @@ def _construct_neural_network_layer(self): """Creates the neural network layer""" # Add Nodes for node_number in range(self.num_nodes): - node_object = Circle(radius=self.node_radius, color=self.node_color, - stroke_width=self.node_stroke_width) + node_object = Circle( + radius=self.node_radius, + color=self.node_color, + stroke_width=self.node_stroke_width, + ) self.node_group.add(node_object) # Space the nodes # Assumes Vertical orientation @@ -38,17 +53,24 @@ def _construct_neural_network_layer(self): location = node_index * self.node_spacing node_object.move_to([0, location, 0]) # Create Surrounding Rectangle - self.surrounding_rectangle = SurroundingRectangle(self.node_group, color=self.rectangle_color, - fill_color=self.rectangle_fill_color, fill_opacity=1.0, - buff=self.layer_buffer, stroke_width=self.rectangle_stroke_width) - self.surrounding_rectangle.set_z_index(1) + self.surrounding_rectangle = SurroundingRectangle( + self.node_group, + color=self.rectangle_color, + fill_color=self.rectangle_fill_color, + fill_opacity=1.0, + buff=self.layer_buffer, + stroke_width=self.rectangle_stroke_width, + ) + self.surrounding_rectangle.set_z_index(1) # Add the objects to the class self.add(self.surrounding_rectangle, self.node_group) def make_forward_pass_animation(self, layer_args={}, **kwargs): # make highlight animation succession = Succession( - ApplyMethod(self.node_group.set_color, self.animation_dot_color, run_time=0.25), + ApplyMethod( + self.node_group.set_color, self.animation_dot_color, run_time=0.25 + ), Wait(1.0), ApplyMethod(self.node_group.set_color, self.node_color, run_time=0.25), ) @@ -65,4 +87,4 @@ def _create_override(self, **kwargs): animations.append(Create(node)) animation_group = AnimationGroup(*animations, lag_ratio=0.0) - return animation_group \ No newline at end of file + return animation_group diff --git a/manim_ml/neural_network/layers/feed_forward_to_embedding.py b/manim_ml/neural_network/layers/feed_forward_to_embedding.py index 8e3da57..bbbb521 100644 --- a/manim_ml/neural_network/layers/feed_forward_to_embedding.py +++ b/manim_ml/neural_network/layers/feed_forward_to_embedding.py @@ -3,15 +3,28 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + class FeedForwardToEmbedding(ConnectiveLayer): """Feed Forward to Embedding Layer""" + input_class = FeedForwardLayer output_class = EmbeddingLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03, - **kwargs): - super().__init__(input_layer, output_layer, input_class=FeedForwardLayer, output_class=EmbeddingLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.03, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=FeedForwardLayer, + output_class=EmbeddingLayer, + **kwargs + ) self.feed_forward_layer = input_layer self.embedding_layer = output_layer self.animation_dot_color = animation_dot_color @@ -27,7 +40,11 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): # Move the dots to the centers of each of the nodes in the FeedForwardLayer dots = [] for node in self.feed_forward_layer.node_group: - new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + node.get_center(), + radius=self.dot_radius, + color=self.animation_dot_color, + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(location), @@ -50,4 +67,3 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): @override_animation(Create) def _create_override(self, **kwargs): return AnimationGroup() - diff --git a/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py b/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py index ff8516e..b4c6ded 100644 --- a/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py +++ b/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py @@ -5,16 +5,32 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + class FeedForwardToFeedForward(ConnectiveLayer): """Layer for connecting FeedForward layer to FeedForwardLayer""" + input_class = FeedForwardLayer output_class = FeedForwardLayer - def __init__(self, input_layer, output_layer, passing_flash=True, - dot_radius=0.05, animation_dot_color=RED, edge_color=WHITE, - edge_width=1.5, camera=None, **kwargs): - super().__init__(input_layer, output_layer, input_class=FeedForwardLayer, output_class=FeedForwardLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + passing_flash=True, + dot_radius=0.05, + animation_dot_color=RED, + edge_color=WHITE, + edge_width=1.5, + camera=None, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=FeedForwardLayer, + output_class=FeedForwardLayer, + **kwargs + ) self.passing_flash = passing_flash self.edge_color = edge_color self.dot_radius = dot_radius @@ -29,8 +45,12 @@ def construct_edges(self): edges = [] for node_i in self.input_layer.node_group: for node_j in self.output_layer.node_group: - line = Line(node_i.get_center(), node_j.get_center(), - color=self.edge_color, stroke_width=self.edge_width) + line = Line( + node_i.get_center(), + node_j.get_center(), + color=self.edge_color, + stroke_width=self.edge_width, + ) edges.append(line) edges = VGroup(*edges) @@ -39,7 +59,7 @@ def construct_edges(self): @override_animation(FadeOut) def _fadeout_animation(self): animations = [] - + for edge in self.edges: animations.append(FadeOut(edge)) @@ -53,25 +73,19 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1, **kwargs): dots = [] for edge in self.edges: dot = Dot( - color=self.animation_dot_color, - fill_opacity=1.0, - radius=self.dot_radius - ) + color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius + ) # Add to dots group dots.append(dot) # Make the animation if self.passing_flash: copy_edge = edge.copy() anim = ShowPassingFlash( - copy_edge.set_color(self.animation_dot_color), - time_width=0.2 + copy_edge.set_color(self.animation_dot_color), time_width=0.2 ) else: anim = MoveAlongPath( - dot, - edge, - run_time=run_time, - rate_function=sigmoid + dot, edge, run_time=run_time, rate_function=sigmoid ) path_animations.append(anim) @@ -83,12 +97,7 @@ def make_forward_pass_animation(self, layer_args={}, run_time=1, **kwargs): return path_animations - def modify_edge_colors( - self, - colors=None, - magnitudes=None, - color_scheme="inferno" - ): + def modify_edge_colors(self, colors=None, magnitudes=None, color_scheme="inferno"): """Changes the colors of edges""" # TODO implement pass diff --git a/manim_ml/neural_network/layers/feed_forward_to_image.py b/manim_ml/neural_network/layers/feed_forward_to_image.py index ef6b940..5df1bce 100644 --- a/manim_ml/neural_network/layers/feed_forward_to_image.py +++ b/manim_ml/neural_network/layers/feed_forward_to_image.py @@ -3,18 +3,31 @@ from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + class FeedForwardToImage(ConnectiveLayer): """Image Layer to FeedForward layer""" + input_class = FeedForwardLayer output_class = ImageLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, - dot_radius=0.05, **kwargs): - super().__init__(input_layer, output_layer, input_class=FeedForwardLayer, output_class=ImageLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.05, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=FeedForwardLayer, + output_class=ImageLayer, + **kwargs + ) self.animation_dot_color = animation_dot_color self.dot_radius = dot_radius - + self.feed_forward_layer = input_layer self.image_layer = output_layer @@ -23,9 +36,13 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): animations = [] image_mobject = self.image_layer.image_mobject # Move the dots to the centers of each of the nodes in the FeedForwardLayer - image_location = image_mobject.get_center() + image_location = image_mobject.get_center() for node in self.feed_forward_layer.node_group: - new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + node.get_center(), + radius=self.dot_radius, + color=self.animation_dot_color, + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(image_location), @@ -37,4 +54,4 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): @override_animation(Create) def _create_override(self): - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/feed_forward_to_vector.py b/manim_ml/neural_network/layers/feed_forward_to_vector.py index a61fb13..12ca76a 100644 --- a/manim_ml/neural_network/layers/feed_forward_to_vector.py +++ b/manim_ml/neural_network/layers/feed_forward_to_vector.py @@ -3,18 +3,31 @@ from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer from manim_ml.neural_network.layers.vector import VectorLayer + class FeedForwardToVector(ConnectiveLayer): """Image Layer to FeedForward layer""" + input_class = FeedForwardLayer output_class = VectorLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, - dot_radius=0.05, **kwargs): - super().__init__(input_layer, output_layer, input_class=FeedForwardLayer, output_class=VectorLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.05, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=FeedForwardLayer, + output_class=VectorLayer, + **kwargs + ) self.animation_dot_color = animation_dot_color self.dot_radius = dot_radius - + self.feed_forward_layer = input_layer self.vector_layer = output_layer @@ -22,9 +35,13 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): """Makes dots diverge from the given location and move to the feed forward nodes decoder""" animations = [] # Move the dots to the centers of each of the nodes in the FeedForwardLayer - destination = self.vector_layer.get_center() + destination = self.vector_layer.get_center() for node in self.feed_forward_layer.node_group: - new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + node.get_center(), + radius=self.dot_radius, + color=self.animation_dot_color, + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(destination), @@ -36,4 +53,4 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): @override_animation(Create) def _create_override(self): - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/image.py b/manim_ml/neural_network/layers/image.py index 03cc77c..21ebfd9 100644 --- a/manim_ml/neural_network/layers/image.py +++ b/manim_ml/neural_network/layers/image.py @@ -4,6 +4,7 @@ from PIL import Image + class ImageLayer(NeuralNetworkLayer): """Single Image Layer for Neural Network""" @@ -18,7 +19,9 @@ def __init__(self, numpy_image, height=1.5, show_image_on_create=True, **kwargs) elif len(np.shape(self.numpy_image)) == 3: # Assumed RGB self.num_channels = 3 - self.image_mobject = ImageMobject(self.numpy_image).scale_to_fit_height(height) + self.image_mobject = ImageMobject(self.numpy_image).scale_to_fit_height( + height + ) self.add(self.image_mobject) @classmethod @@ -44,11 +47,11 @@ def _create_override(self, **kwargs): def make_forward_pass_animation(self, layer_args={}, **kwargs): return AnimationGroup() - + # def move_to(self, location): # """Override of move to""" # self.image_mobject.move_to(location) - + def get_right(self): """Override get right""" return self.image_mobject.get_right() @@ -59,4 +62,4 @@ def width(self): @property def height(self): - return self.image_mobject.height \ No newline at end of file + return self.image_mobject.height diff --git a/manim_ml/neural_network/layers/image_to_convolutional3d.py b/manim_ml/neural_network/layers/image_to_convolutional3d.py index fa03714..b70bd24 100644 --- a/manim_ml/neural_network/layers/image_to_convolutional3d.py +++ b/manim_ml/neural_network/layers/image_to_convolutional3d.py @@ -3,25 +3,27 @@ from manim import * from manim_ml.neural_network.layers.convolutional3d import Convolutional3DLayer from manim_ml.neural_network.layers.image import ImageLayer -from manim_ml.neural_network.layers.parent_layers import ThreeDLayer, VGroupNeuralNetworkLayer +from manim_ml.neural_network.layers.parent_layers import ( + ThreeDLayer, + VGroupNeuralNetworkLayer, +) from manim_ml.gridded_rectangle import GriddedRectangle + class ImageToConvolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer): """Handles rendering a convolutional layer for a nn""" + input_class = ImageLayer output_class = Convolutional3DLayer - def __init__(self, input_layer: ImageLayer, output_layer: Convolutional3DLayer, **kwargs): + def __init__( + self, input_layer: ImageLayer, output_layer: Convolutional3DLayer, **kwargs + ): super().__init__(input_layer, output_layer, **kwargs) self.input_layer = input_layer self.output_layer = output_layer - def make_forward_pass_animation( - self, - run_time=5, - layer_args={}, - **kwargs - ): + def make_forward_pass_animation(self, run_time=5, layer_args={}, **kwargs): """Maps image to convolutional layer""" # Transform the image from the input layer to the num_image_channels = self.input_layer.num_channels @@ -30,7 +32,9 @@ def make_forward_pass_animation( elif num_image_channels == 1: return self.grayscale_image_animation() else: - raise Exception(f"Unrecognized number of image channels: {num_image_channels}") + raise Exception( + f"Unrecognized number of image channels: {num_image_channels}" + ) def rbg_image_animation(self): """Handles animation for 3 channel image""" @@ -53,7 +57,7 @@ def grayscale_image_animation(self): ThreeDLayer.rotation_angle, ThreeDLayer.rotation_axis, image_mobject.get_center(), - run_time=0.5 + run_time=0.5, ) """ x_rotation = ApplyMethod( @@ -72,25 +76,14 @@ def grayscale_image_animation(self): ) """ # Set opacity - set_opacity = ApplyMethod( - image_mobject.set_opacity, - 0.2, - run_time=0.5 - ) - # Scale the max of width or height to the + set_opacity = ApplyMethod(image_mobject.set_opacity, 0.2, run_time=0.5) + # Scale the max of width or height to the # width of the feature_map max_width_height = max(image_mobject.width, image_mobject.height) scale_factor = target_feature_map.rectangle_width / max_width_height - scale_image = ApplyMethod( - image_mobject.scale, - scale_factor, - run_time=0.5 - ) + scale_image = ApplyMethod(image_mobject.scale, scale_factor, run_time=0.5) # Move the image - move_image = ApplyMethod( - image_mobject.move_to, - target_feature_map - ) + move_image = ApplyMethod(image_mobject.move_to, target_feature_map) # Compose the animations animation = Succession( rotation, @@ -99,10 +92,10 @@ def grayscale_image_animation(self): move_image, ) return animation - + def scale(self, scale_factor, **kwargs): super().scale(scale_factor, **kwargs) @override_animation(Create) def _create_override(self, **kwargs): - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/image_to_feed_forward.py b/manim_ml/neural_network/layers/image_to_feed_forward.py index 59d838f..62687ee 100644 --- a/manim_ml/neural_network/layers/image_to_feed_forward.py +++ b/manim_ml/neural_network/layers/image_to_feed_forward.py @@ -3,15 +3,28 @@ from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + class ImageToFeedForward(ConnectiveLayer): """Image Layer to FeedForward layer""" + input_class = ImageLayer output_class = FeedForwardLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, - dot_radius=0.05, **kwargs): - super().__init__(input_layer, output_layer, input_class=ImageLayer, output_class=FeedForwardLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.05, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=ImageLayer, + output_class=FeedForwardLayer, + **kwargs + ) self.animation_dot_color = animation_dot_color self.dot_radius = dot_radius @@ -24,19 +37,21 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): dots = [] image_mobject = self.image_layer.image_mobject # Move the dots to the centers of each of the nodes in the FeedForwardLayer - image_location = image_mobject.get_center() + image_location = image_mobject.get_center() for node in self.feed_forward_layer.node_group: - new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + image_location, radius=self.dot_radius, color=self.animation_dot_color + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(node.get_center()), ) animations.append(per_node_succession) dots.append(new_dot) - + animation_group = AnimationGroup(*animations) return animation_group @override_animation(Create) def _create_override(self): - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/paired_query.py b/manim_ml/neural_network/layers/paired_query.py index 193f406..b4e1991 100644 --- a/manim_ml/neural_network/layers/paired_query.py +++ b/manim_ml/neural_network/layers/paired_query.py @@ -3,11 +3,13 @@ from manim_ml.image import GrayscaleImageMobject, LabeledColorImage import numpy as np + class PairedQueryLayer(NeuralNetworkLayer): """Paired Query Layer""" - def __init__(self, positive, negative, stroke_width=5, font_size=18, - spacing=0.5, **kwargs): + def __init__( + self, positive, negative, stroke_width=5, font_size=18, spacing=0.5, **kwargs + ): super().__init__(**kwargs) self.positive = positive self.negative = negative @@ -19,7 +21,7 @@ def __init__(self, positive, negative, stroke_width=5, font_size=18, self.assets = self.make_assets() self.add(self.assets) self.add(self.title) - + @classmethod def from_paths(cls, positive_path, negative_path, grayscale=True, **kwargs): """Creates a query using the paths""" @@ -37,23 +39,23 @@ def from_paths(cls, positive_path, negative_path, grayscale=True, **kwargs): def make_assets(self): """ - Constructs the assets needed for a query layer + Constructs the assets needed for a query layer """ # Handle positive positive_group = LabeledColorImage( - self.positive, + self.positive, color=BLUE, label="Positive", font_size=self.font_size, - stroke_width=self.stroke_width + stroke_width=self.stroke_width, ) # Handle negative negative_group = LabeledColorImage( - self.negative, + self.negative, color=RED, - label="Negative", + label="Negative", font_size=self.font_size, - stroke_width=self.stroke_width + stroke_width=self.stroke_width, ) # Distribute the groups uniformly vertically assets = Group(positive_group, negative_group) @@ -68,4 +70,4 @@ def _create_override(self): def make_forward_pass_animation(self, layer_args={}, **kwargs): """Forward pass for query""" - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/paired_query_to_feed_forward.py b/manim_ml/neural_network/layers/paired_query_to_feed_forward.py index f1d8775..a3cbc81 100644 --- a/manim_ml/neural_network/layers/paired_query_to_feed_forward.py +++ b/manim_ml/neural_network/layers/paired_query_to_feed_forward.py @@ -3,14 +3,28 @@ from manim_ml.neural_network.layers.paired_query import PairedQueryLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + class PairedQueryToFeedForward(ConnectiveLayer): """PairedQuery layer to FeedForward layer""" + input_class = PairedQueryLayer output_class = FeedForwardLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.02, **kwargs): - super().__init__(input_layer, output_layer, input_class=PairedQueryLayer, output_class=FeedForwardLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.02, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=PairedQueryLayer, + output_class=FeedForwardLayer, + **kwargs + ) self.animation_dot_color = animation_dot_color self.dot_radius = dot_radius @@ -26,22 +40,26 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): image_animations = [] dots = [] # Move dots from each image to the centers of each of the nodes in the FeedForwardLayer - image_location = image_mobject.get_center() + image_location = image_mobject.get_center() for node in self.feed_forward_layer.node_group: - new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + image_location, + radius=self.dot_radius, + color=self.animation_dot_color, + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(node.get_center()), ) image_animations.append(per_node_succession) dots.append(new_dot) - + animations.append(AnimationGroup(*image_animations)) - + animation_group = AnimationGroup(*animations) - + return animation_group @override_animation(Create) def _create_override(self): - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/parent_layers.py b/manim_ml/neural_network/layers/parent_layers.py index 23929a3..4d0ebc8 100644 --- a/manim_ml/neural_network/layers/parent_layers.py +++ b/manim_ml/neural_network/layers/parent_layers.py @@ -1,16 +1,14 @@ from manim import * from abc import ABC, abstractmethod + class NeuralNetworkLayer(ABC, Group): """Abstract Neural Network Layer class""" def __init__(self, text=None, *args, **kwargs): super(Group, self).__init__() self.title_text = kwargs["title"] if "title" in kwargs else " " - self.title = Text( - self.title_text, - font_size=DEFAULT_FONT_SIZE/3 - ).scale(0.6) + self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE / 3).scale(0.6) self.title.next_to(self, UP, 1.2) # self.add(self.title) @@ -25,28 +23,31 @@ def _create_override(self): def __repr__(self): return f"{type(self).__name__}" -class VGroupNeuralNetworkLayer(NeuralNetworkLayer): +class VGroupNeuralNetworkLayer(NeuralNetworkLayer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # self.camera = camera @abstractmethod def make_forward_pass_animation(self, **kwargs): - pass + pass @override_animation(Create) def _create_override(self): return super()._create_override() + class ThreeDLayer(ABC): """Abstract class for 3D layers""" + # Angle of ThreeD layers is static context - three_d_x_rotation = 90 * DEGREES #-90 * DEGREES - three_d_y_rotation = 0 * DEGREES # -10 * DEGREES + three_d_x_rotation = 90 * DEGREES # -90 * DEGREES + three_d_y_rotation = 0 * DEGREES # -10 * DEGREES rotation_angle = 60 * DEGREES rotation_axis = [0.0, 0.9, 0.0] + class ConnectiveLayer(VGroupNeuralNetworkLayer): """Forward pass animation for a given pair of layers""" @@ -67,6 +68,7 @@ def make_forward_pass_animation(self, run_time=2.0, layer_args={}, **kwargs): def _create_override(self): return super()._create_override() + class BlankConnective(ConnectiveLayer): """Connective layer to be used when the given pair of layers is undefined""" @@ -78,4 +80,4 @@ def make_forward_pass_animation(self, run_time=1.5, layer_args={}, **kwargs): @override_animation(Create) def _create_override(self): - return super()._create_override() \ No newline at end of file + return super()._create_override() diff --git a/manim_ml/neural_network/layers/triplet.py b/manim_ml/neural_network/layers/triplet.py index d4e2a78..9ff8324 100644 --- a/manim_ml/neural_network/layers/triplet.py +++ b/manim_ml/neural_network/layers/triplet.py @@ -3,11 +3,20 @@ from manim_ml.image import GrayscaleImageMobject, LabeledColorImage import numpy as np + class TripletLayer(NeuralNetworkLayer): """Shows triplet images""" - def __init__(self, anchor, positive, negative, stroke_width=5, - font_size=22, buff=0.2, **kwargs): + def __init__( + self, + anchor, + positive, + negative, + stroke_width=5, + font_size=22, + buff=0.2, + **kwargs + ): super().__init__(**kwargs) self.anchor = anchor self.positive = positive @@ -19,10 +28,17 @@ def __init__(self, anchor, positive, negative, stroke_width=5, # Make the assets self.assets = self.make_assets() self.add(self.assets) - + @classmethod - def from_paths(cls, anchor_path, positive_path, negative_path, grayscale=True, - font_size=22, buff=0.2): + def from_paths( + cls, + anchor_path, + positive_path, + negative_path, + grayscale=True, + font_size=22, + buff=0.2, + ): """Creates a triplet using the anchor paths""" # Load images from path if grayscale: @@ -40,34 +56,34 @@ def from_paths(cls, anchor_path, positive_path, negative_path, grayscale=True, def make_assets(self): """ - Constructs the assets needed for a triplet layer + Constructs the assets needed for a triplet layer """ # Handle anchor anchor_group = LabeledColorImage( - self.anchor, + self.anchor, color=WHITE, - label="Anchor", + label="Anchor", stroke_width=self.stroke_width, font_size=self.font_size, - buff=self.buff + buff=self.buff, ) # Handle positive positive_group = LabeledColorImage( - self.positive, + self.positive, color=GREEN, - label="Positive", + label="Positive", stroke_width=self.stroke_width, font_size=self.font_size, - buff=self.buff + buff=self.buff, ) # Handle negative negative_group = LabeledColorImage( - self.negative, + self.negative, color=RED, - label="Negative", + label="Negative", stroke_width=self.stroke_width, font_size=self.font_size, - buff=self.buff + buff=self.buff, ) # Distribute the groups uniformly vertically assets = Group(anchor_group, positive_group, negative_group) diff --git a/manim_ml/neural_network/layers/triplet_to_feed_forward.py b/manim_ml/neural_network/layers/triplet_to_feed_forward.py index 22939d8..77ed4ce 100644 --- a/manim_ml/neural_network/layers/triplet_to_feed_forward.py +++ b/manim_ml/neural_network/layers/triplet_to_feed_forward.py @@ -3,15 +3,28 @@ from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer from manim_ml.neural_network.layers.triplet import TripletLayer + class TripletToFeedForward(ConnectiveLayer): """TripletLayer to FeedForward layer""" + input_class = TripletLayer output_class = FeedForwardLayer - def __init__(self, input_layer, output_layer, animation_dot_color=RED, - dot_radius=0.02, **kwargs): - super().__init__(input_layer, output_layer, input_class=TripletLayer, output_class=FeedForwardLayer, - **kwargs) + def __init__( + self, + input_layer, + output_layer, + animation_dot_color=RED, + dot_radius=0.02, + **kwargs + ): + super().__init__( + input_layer, + output_layer, + input_class=TripletLayer, + output_class=FeedForwardLayer, + **kwargs + ) self.animation_dot_color = animation_dot_color self.dot_radius = dot_radius @@ -22,27 +35,35 @@ def make_forward_pass_animation(self, layer_args={}, **kwargs): """Makes dots diverge from the given location and move to the feed forward nodes decoder""" animations = [] # Loop through each image - images = [self.triplet_layer.anchor, self.triplet_layer.positive, self.triplet_layer.negative] + images = [ + self.triplet_layer.anchor, + self.triplet_layer.positive, + self.triplet_layer.negative, + ] for image_mobject in images: image_animations = [] dots = [] # Move dots from each image to the centers of each of the nodes in the FeedForwardLayer - image_location = image_mobject.get_center() + image_location = image_mobject.get_center() for node in self.feed_forward_layer.node_group: - new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color) + new_dot = Dot( + image_location, + radius=self.dot_radius, + color=self.animation_dot_color, + ) per_node_succession = Succession( Create(new_dot), new_dot.animate.move_to(node.get_center()), ) image_animations.append(per_node_succession) dots.append(new_dot) - + animations.append(AnimationGroup(*image_animations)) - + animation_group = AnimationGroup(*animations) - + return animation_group @override_animation(Create) def _create_override(self): - return AnimationGroup() \ No newline at end of file + return AnimationGroup() diff --git a/manim_ml/neural_network/layers/util.py b/manim_ml/neural_network/layers/util.py index be6d0be..fd897aa 100644 --- a/manim_ml/neural_network/layers/util.py +++ b/manim_ml/neural_network/layers/util.py @@ -4,22 +4,26 @@ from manim_ml.neural_network.layers.parent_layers import BlankConnective, ThreeDLayer from ..layers import connective_layers_list + def get_connective_layer(input_layer, output_layer): """ - Deduces the relevant connective layer + Deduces the relevant connective layer """ connective_layer_class = None for candidate_class in connective_layers_list: input_class = candidate_class.input_class output_class = candidate_class.output_class - if isinstance(input_layer, input_class) \ - and isinstance(output_layer, output_class): + if isinstance(input_layer, input_class) and isinstance( + output_layer, output_class + ): connective_layer_class = candidate_class break if connective_layer_class is None: connective_layer_class = BlankConnective - warnings.warn(f"Unrecognized input/output class pair: {input_class} and {output_class}") + warnings.warn( + f"Unrecognized input/output class pair: {input_class} and {output_class}" + ) # Make the instance now connective_layer = connective_layer_class(input_layer, output_layer) diff --git a/manim_ml/neural_network/layers/vector.py b/manim_ml/neural_network/layers/vector.py index c05a73f..b861ca6 100644 --- a/manim_ml/neural_network/layers/vector.py +++ b/manim_ml/neural_network/layers/vector.py @@ -3,11 +3,11 @@ from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer + class VectorLayer(VGroupNeuralNetworkLayer): """Shows a vector""" - def __init__(self, num_values, value_func=lambda: random.uniform(0, 1), - **kwargs): + def __init__(self, num_values, value_func=lambda: random.uniform(0, 1), **kwargs): super().__init__(**kwargs) self.num_values = num_values self.value_func = value_func @@ -30,8 +30,8 @@ def make_vector(self): def make_forward_pass_animation(self, layer_args={}, **kwargs): return AnimationGroup() - + @override_animation(Create) def _create_override(self): """Create animation""" - return Write(self.vector_label) \ No newline at end of file + return Write(self.vector_label) diff --git a/manim_ml/neural_network/neural_network.py b/manim_ml/neural_network/neural_network.py index b49773b..86a39e0 100644 --- a/manim_ml/neural_network/neural_network.py +++ b/manim_ml/neural_network/neural_network.py @@ -17,15 +17,27 @@ from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer, ThreeDLayer from manim_ml.neural_network.layers.util import get_connective_layer from manim_ml.list_group import ListGroup -from manim_ml.neural_network.neural_network_transformations import InsertLayer, RemoveLayer +from manim_ml.neural_network.neural_network_transformations import ( + InsertLayer, + RemoveLayer, +) + class NeuralNetwork(Group): """Neural Network Visualization Container Class""" - def __init__(self, input_layers, edge_color=WHITE, layer_spacing=0.2, - animation_dot_color=RED, edge_width=2.5, dot_radius=0.03, - title=" ", three_d_phi=-70 * DEGREES, - three_d_theta=-80 * DEGREES): + def __init__( + self, + input_layers, + edge_color=WHITE, + layer_spacing=0.2, + animation_dot_color=RED, + edge_width=2.5, + dot_radius=0.03, + title=" ", + three_d_phi=-70 * DEGREES, + three_d_theta=-80 * DEGREES, + ): super(Group, self).__init__() self.input_layers = ListGroup(*input_layers) self.edge_width = edge_width @@ -44,7 +56,7 @@ def __init__(self, input_layers, edge_color=WHITE, layer_spacing=0.2, self.camera.add_fixed_orientation_mobjects(layer) self.camera.add_fixed_in_frame_mobjects(layer) """ - # TODO take layer_node_count [0, (1, 2), 0] + # TODO take layer_node_count [0, (1, 2), 0] # and make it have explicit distinct subspaces # Add camera to input layers """ @@ -56,10 +68,7 @@ def __init__(self, input_layers, edge_color=WHITE, layer_spacing=0.2, self._place_layers() self.connective_layers, self.all_layers = self._construct_connective_layers() # Make overhead title - self.title = Text( - self.title_text, - font_size=DEFAULT_FONT_SIZE/2 - ) + self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE / 2) self.title.next_to(self, UP, 1.0) self.add(self.title) # Place layers at correct z index @@ -86,11 +95,29 @@ def _place_layers(self): current_layer = self.input_layers[layer_index] current_layer.move_to(previous_layer) # TODO Temp fix - if isinstance(current_layer, EmbeddingLayer) \ - or isinstance(previous_layer, EmbeddingLayer): - shift_vector = np.array([(previous_layer.get_width()/2 + current_layer.get_width()/2 - 0.2), 0, 0]) + if isinstance(current_layer, EmbeddingLayer) or isinstance( + previous_layer, EmbeddingLayer + ): + shift_vector = np.array( + [ + ( + previous_layer.get_width() / 2 + + current_layer.get_width() / 2 + - 0.2 + ), + 0, + 0, + ] + ) else: - shift_vector = np.array([(previous_layer.get_width()/2 + current_layer.get_width()/2) + self.layer_spacing, 0, 0]) + shift_vector = np.array( + [ + (previous_layer.get_width() / 2 + current_layer.get_width() / 2) + + self.layer_spacing, + 0, + 0, + ] + ) current_layer.shift(shift_vector) def _construct_connective_layers(self): @@ -149,24 +176,25 @@ def replace_layer(self, old_layer, new_layer): insert_animation = self.insert_layer(layer, insert_index) # Make the animation animation_group = AnimationGroup( - FadeOut(self.all_layers[insert_index]), - FadeIn(layer), - lag_ratio=1.0 + FadeOut(self.all_layers[insert_index]), FadeIn(layer), lag_ratio=1.0 ) return animation_group - def make_forward_pass_animation(self, run_time=None, passing_flash=True, layer_args={}, - **kwargs): + def make_forward_pass_animation( + self, run_time=None, passing_flash=True, layer_args={}, **kwargs + ): """Generates an animation for feed forward propagation""" all_animations = [] - per_layer_runtime = run_time / len(self.all_layers) if not run_time is None else None + per_layer_runtime = ( + run_time / len(self.all_layers) if not run_time is None else None + ) for layer_index, layer in enumerate(self.all_layers): # Get the layer args if isinstance(layer, ConnectiveLayer): """ - NOTE: By default a connective layer will get the combined - layer_args of the layers it is connecting. + NOTE: By default a connective layer will get the combined + layer_args of the layers it is connecting. """ before_layer_args = {} after_layer_args = {} @@ -182,16 +210,11 @@ def make_forward_pass_animation(self, run_time=None, passing_flash=True, layer_a current_layer_args = layer_args[layer] # Perform the forward pass of the current layer layer_forward_pass = layer.make_forward_pass_animation( - layer_args=current_layer_args, - run_time=per_layer_runtime, - **kwargs + layer_args=current_layer_args, run_time=per_layer_runtime, **kwargs ) all_animations.append(layer_forward_pass) # Make the animation group - animation_group = Succession( - *all_animations, - lag_ratio=1.0 - ) + animation_group = Succession(*all_animations, lag_ratio=1.0) return animation_group @@ -212,14 +235,11 @@ def _create_override(self, **kwargs): # Make titles create_title = Create(layer.title) # Create layer animation group - animation_group = AnimationGroup( - layer_animation, - create_title - ) + animation_group = AnimationGroup(layer_animation, create_title) animations.append(animation_group) animation_group = AnimationGroup(*animations, lag_ratio=1.0) - + return animation_group def set_z_index(self, z_index_value: float, family=False): @@ -240,7 +260,7 @@ def __repr__(self, metadata=["z_index", "title_text"]): inner_string = "" for layer in self.all_layers: inner_string += f"{repr(layer)} (" - for key in metadata: + for key in metadata: value = getattr(layer, key) if not value is "": inner_string += f"{key}={value}, " @@ -250,15 +270,17 @@ def __repr__(self, metadata=["z_index", "title_text"]): string_repr = "NeuralNetwork([\n" + inner_string + "])" return string_repr + class FeedForwardNeuralNetwork(NeuralNetwork): """NeuralNetwork with just feed forward layers""" - def __init__(self, layer_node_count, node_radius=0.08, - node_color=BLUE, **kwargs): + def __init__(self, layer_node_count, node_radius=0.08, node_color=BLUE, **kwargs): # construct layers layers = [] for num_nodes in layer_node_count: - layer = FeedForwardLayer(num_nodes, node_color=node_color, node_radius=node_radius) + layer = FeedForwardLayer( + num_nodes, node_color=node_color, node_radius=node_radius + ) layers.append(layer) # call super class - super().__init__(layers, **kwargs) \ No newline at end of file + super().__init__(layers, **kwargs) diff --git a/manim_ml/neural_network/neural_network_transformations.py b/manim_ml/neural_network/neural_network_transformations.py index 3579a0d..28423c4 100644 --- a/manim_ml/neural_network/neural_network_transformations.py +++ b/manim_ml/neural_network/neural_network_transformations.py @@ -4,17 +4,18 @@ from manim import * from manim_ml.neural_network.layers.util import get_connective_layer + class RemoveLayer(AnimationGroup): """ - Animation for removing a layer from a neural network. + Animation for removing a layer from a neural network. - Note: I needed to do something strange for creating the new connective layer. - The issue with creating it initially is that the positions of the sides of the - connective layer depend upon the location of the moved layers **after** the - move animations are performed. However, all of these animations are performed - after the animations have been created. This means that the animation depends upon - the state of the neural network layers after previous animations have been run. - To fix this issue I needed to use an UpdateFromFunc. + Note: I needed to do something strange for creating the new connective layer. + The issue with creating it initially is that the positions of the sides of the + connective layer depend upon the location of the moved layers **after** the + move animations are performed. However, all of these animations are performed + after the animations have been created. This means that the animation depends upon + the state of the neural network layers after previous animations have been run. + To fix this issue I needed to use an UpdateFromFunc. """ def __init__(self, layer, neural_network, layer_spacing=0.2): @@ -32,11 +33,7 @@ def __init__(self, layer, neural_network, layer_spacing=0.2): move_animations = self.make_move_animation() new_connective_animation = self.make_new_connective_animation() # Add all of the animations to the group - animations_list = [ - remove_animations, - move_animations, - new_connective_animation - ] + animations_list = [remove_animations, move_animations, new_connective_animation] super().__init__(*animations_list, lag_ratio=1.0) @@ -54,10 +51,10 @@ def get_connective_layers(self): if layer_index - 2 >= 0: before_layer = self.neural_network.all_layers[layer_index - 2] before_connective = self.neural_network.all_layers[layer_index - 1] - if layer_index + 2 < len(self.neural_network.all_layers): + if layer_index + 2 < len(self.neural_network.all_layers): after_layer = self.neural_network.all_layers[layer_index + 2] after_connective = self.neural_network.all_layers[layer_index + 1] - + return before_layer, after_layer, before_connective, after_connective def make_remove_animation(self): @@ -66,8 +63,7 @@ def make_remove_animation(self): remove_connective_animation = self.make_remove_connective_layers_animation() # Remove animations remove_animations = AnimationGroup( - remove_layer_animation, - remove_connective_animation + remove_layer_animation, remove_connective_animation ) return remove_animations @@ -93,8 +89,7 @@ def make_remove_connective_layers_animation(self): fade_out_after_connective = FadeOut(self.after_connective) # Group items remove_connective_group = AnimationGroup( - fade_out_after_connective, - fade_out_before_connective + fade_out_after_connective, fade_out_before_connective ) return remove_connective_group @@ -106,44 +101,56 @@ def make_move_animation(self): shift_right_amount = None if not self.before_layer is None: # Compute shift amount - layer_dist = np.abs(self.layer.get_center() - self.before_layer.get_right())[0] - shift_right_amount = np.array([layer_dist - self.layer_spacing/2, 0, 0]) + layer_dist = np.abs( + self.layer.get_center() - self.before_layer.get_right() + )[0] + shift_right_amount = np.array([layer_dist - self.layer_spacing / 2, 0, 0]) # Shift all layers before forward - before_layer_index = self.neural_network.all_layers.index_of(self.before_layer) - layers_before = Group(*self.neural_network.all_layers[:before_layer_index + 1]) + before_layer_index = self.neural_network.all_layers.index_of( + self.before_layer + ) + layers_before = Group( + *self.neural_network.all_layers[: before_layer_index + 1] + ) move_before_layers = layers_before.animate.shift(shift_right_amount) move_after_layers = AnimationGroup() shift_left_amount = None if not self.after_layer is None: - layer_dist = np.abs(self.after_layer.get_left() - self.layer.get_center())[0] + layer_dist = np.abs(self.after_layer.get_left() - self.layer.get_center())[ + 0 + ] shift_left_amount = np.array([-layer_dist + self.layer_spacing / 2, 0, 0]) # Shift all layers after backward - after_layer_index = self.neural_network.all_layers.index_of(self.after_layer) + after_layer_index = self.neural_network.all_layers.index_of( + self.after_layer + ) layers_after = Group(*self.neural_network.all_layers[after_layer_index:]) move_after_layers = layers_after.animate.shift(shift_left_amount) # Group the move animations - move_group = AnimationGroup( - move_before_layers, - move_after_layers - ) + move_group = AnimationGroup(move_before_layers, move_after_layers) return move_group def make_new_connective_animation(self): """Makes new connective layer""" self.anim_count = 0 + def create_new_connective(neural_network): """ - Creates new connective layer - - This is a closure that creates a new connective layer and animates it. + Creates new connective layer + + This is a closure that creates a new connective layer and animates it. """ self.anim_count += 1 if self.anim_count == 1: if not self.before_layer is None and not self.after_layer is None: print(neural_network) - new_connective_class = get_connective_layer(self.before_layer, self.after_layer) - before_layer_index = neural_network.all_layers.index_of(self.before_layer) + 1 + new_connective_class = get_connective_layer( + self.before_layer, self.after_layer + ) + before_layer_index = ( + neural_network.all_layers.index_of(self.before_layer) + 1 + ) neural_network.all_layers.insert(before_layer_index, new_connective) print(neural_network) @@ -151,6 +158,7 @@ def create_new_connective(neural_network): return update_func_anim + class InsertLayer(AnimationGroup): """Animation for inserting layer at given index""" @@ -161,8 +169,8 @@ def __init__(self, layer, index, neural_network): # Check valid index assert index < len(self.neural_network.all_layers) # Layers before and after - self.layers_before = self.neural_network.all_layers[:self.index] - self.layers_after = self.neural_network.all_layers[self.index:] + self.layers_before = self.neural_network.all_layers[: self.index] + self.layers_after = self.neural_network.all_layers[self.index :] # Get the non-connective layer before and after if len(self.layers_before) > 0: self.layer_before = self.layers_before[-2] @@ -172,15 +180,18 @@ def __init__(self, layer, index, neural_network): if not self.layer_after is None: self.layer.move_to(self.layer_after) # Make animations - self.old_connective_layer, remove_connective_layer = self.remove_connective_layer_animation() + ( + self.old_connective_layer, + remove_connective_layer, + ) = self.remove_connective_layer_animation() move_layers = self.make_move_layers_animation() create_layer = self.make_create_layer_animation() # create_connective_layers = self.make_create_connective_layers() animations = [ remove_connective_layer, move_layers, - create_layer, - # create_connective_layers + create_layer, + # create_connective_layers ] super().__init__(*animations, lag_ratio=1.0) @@ -219,27 +230,39 @@ def remove_connective_layer_animation(self): def make_move_layers_animation(self): """Shifts layers before and after""" - before_connective_width, after_connective_width = self.get_connective_layer_widths() + ( + before_connective_width, + after_connective_width, + ) = self.get_connective_layer_widths() old_connective_width = 0 if not self.old_connective_layer is None: old_connective_width = self.old_connective_layer.width # Before layer shift before_shift_animation = AnimationGroup() if len(self.layers_before) > 0: - before_shift = np.array([-self.layer.width/2 - before_connective_width + old_connective_width, 0, 0]) + before_shift = np.array( + [ + -self.layer.width / 2 + - before_connective_width + + old_connective_width, + 0, + 0, + ] + ) # Shift layers before - before_shift_animation = Group(*self.layers_before).animate.shift(before_shift) + before_shift_animation = Group(*self.layers_before).animate.shift( + before_shift + ) # After layer shift after_shift_animation = AnimationGroup() if len(self.layers_after) > 0: - after_shift = np.array([self.layer.width/2 + after_connective_width, 0, 0]) + after_shift = np.array( + [self.layer.width / 2 + after_connective_width, 0, 0] + ) # Shift layers after after_shift_animation = Group(*self.layers_after).animate.shift(after_shift) # Make animation group - shift_animations = AnimationGroup( - before_shift_animation, - after_shift_animation - ) + shift_animations = AnimationGroup(before_shift_animation, after_shift_animation) return shift_animations @@ -247,7 +270,9 @@ def make_create_layer_animation(self): """Animates the creation of the layer""" return Create(self.layer) - def make_create_connective_layers_animation(self, before_connective, after_connective): + def make_create_connective_layers_animation( + self, before_connective, after_connective + ): """Create connective layers""" # Make the layers before_connective = None @@ -260,9 +285,7 @@ def make_create_connective_layers_animation(self, before_connective, after_conne # Insert the layers # Make the animation animation_group = AnimationGroup( - Create(before_connective), - Create(after_connective) + Create(before_connective), Create(after_connective) ) return animation_group - \ No newline at end of file diff --git a/manim_ml/neural_network/variational_autoencoder.py b/manim_ml/neural_network/variational_autoencoder.py index a4bb640..860e152 100644 --- a/manim_ml/neural_network/variational_autoencoder.py +++ b/manim_ml/neural_network/variational_autoencoder.py @@ -10,12 +10,19 @@ from manim_ml.neural_network.layers import FeedForwardLayer, EmbeddingLayer, ImageLayer from manim_ml.neural_network.neural_network import NeuralNetwork + class VariationalAutoencoder(VGroup): """Variational Autoencoder Manim Visualization""" - - def __init__(self, encoder_nodes_per_layer=[5, 3], decoder_nodes_per_layer=[3, 5], - point_color=BLUE, dot_radius=0.05, ellipse_stroke_width=1.0, - layer_spacing=0.5): + + def __init__( + self, + encoder_nodes_per_layer=[5, 3], + decoder_nodes_per_layer=[3, 5], + point_color=BLUE, + dot_radius=0.05, + ellipse_stroke_width=1.0, + layer_spacing=0.5, + ): super(VGroup, self).__init__() self.encoder_nodes_per_layer = encoder_nodes_per_layer self.decoder_nodes_per_layer = decoder_nodes_per_layer @@ -30,13 +37,15 @@ def _construct_neural_network(self): """Makes the VAE encoder, embedding layer, and decoder""" embedding_layer = EmbeddingLayer() - neural_network = NeuralNetwork([ - FeedForwardLayer(5), - FeedForwardLayer(3), - embedding_layer, - FeedForwardLayer(3), - FeedForwardLayer(5) - ]) + neural_network = NeuralNetwork( + [ + FeedForwardLayer(5), + FeedForwardLayer(3), + embedding_layer, + FeedForwardLayer(3), + FeedForwardLayer(5), + ] + ) return neural_network, embedding_layer @@ -46,20 +55,18 @@ def _create_vae(self): def make_triplet_forward_pass(self, triplet): pass - + def make_image_forward_pass(self, input_image, output_image, run_time=1.5): """Override forward pass animation specific to a VAE""" # Make a wrapper NN with images - wrapper_neural_network = NeuralNetwork([ - ImageLayer(input_image), - self.neural_network, - ImageLayer(output_image) - ]) - # Make animation + wrapper_neural_network = NeuralNetwork( + [ImageLayer(input_image), self.neural_network, ImageLayer(output_image)] + ) + # Make animation animation_group = AnimationGroup( Create(wrapper_neural_network), wrapper_neural_network.make_forward_pass_animation(), - lag_ratio=1.0 + lag_ratio=1.0, ) - return animation_group \ No newline at end of file + return animation_group diff --git a/manim_ml/one_to_one_sync.py b/manim_ml/one_to_one_sync.py index d257006..4da14b4 100644 --- a/manim_ml/one_to_one_sync.py +++ b/manim_ml/one_to_one_sync.py @@ -6,5 +6,6 @@ views of the same concept and visualize them at the same time. """ -class OneToOneSync(): - pass \ No newline at end of file + +class OneToOneSync: + pass diff --git a/manim_ml/probability.py b/manim_ml/probability.py index 542e1fa..c4caa05 100644 --- a/manim_ml/probability.py +++ b/manim_ml/probability.py @@ -2,10 +2,13 @@ import numpy as np import math + class GaussianDistribution(VGroup): """Object for drawing a Gaussian distribution""" - def __init__(self, axes, mean=None, cov=None, dist_theme="gaussian", color=ORANGE, **kwargs): + def __init__( + self, axes, mean=None, cov=None, dist_theme="gaussian", color=ORANGE, **kwargs + ): super(VGroup, self).__init__(**kwargs) self.axes = axes self.mean = mean @@ -18,10 +21,14 @@ def __init__(self, axes, mean=None, cov=None, dist_theme="gaussian", color=ORANG self.cov = np.array([[1, 0], [0, 1]]) # Make the Gaussian if self.dist_theme is "gaussian": - self.ellipses = self.construct_gaussian_distribution(self.mean, self.cov, color=self.color) + self.ellipses = self.construct_gaussian_distribution( + self.mean, self.cov, color=self.color + ) self.add(self.ellipses) elif self.dist_theme is "ellipse": - self.ellipses = self.construct_simple_gaussian_ellipse(self.mean, self.cov, color=self.color) + self.ellipses = self.construct_simple_gaussian_ellipse( + self.mean, self.cov, color=self.color + ) self.add(self.ellipses) else: raise Exception(f"Uncrecognized distribution theme: {self.dist_theme}") @@ -33,11 +40,10 @@ def _create_gaussian_distribution(self): """ def compute_covariance_rotation_and_scale(self, covariance): - def eigsorted(cov): - ''' + """ Eigenvalues and eigenvectors of the covariance matrix. - ''' + """ vals, vecs = np.linalg.eigh(cov) order = vals.argsort()[::-1] return vals[order], vecs[:, order] @@ -56,13 +62,16 @@ def cov_ellipse(cov, nstd): return width, height, theta width, height, angle = cov_ellipse(covariance, 1) - scale_factor = np.abs(self.axes.x_range[0] - self.axes.x_range[1]) / self.axes.x_length + scale_factor = ( + np.abs(self.axes.x_range[0] - self.axes.x_range[1]) / self.axes.x_length + ) width /= scale_factor height /= scale_factor return angle, width, height - def construct_gaussian_distribution(self, mean, covariance, color=ORANGE, - num_ellipses=4): + def construct_gaussian_distribution( + self, mean, covariance, color=ORANGE, num_ellipses=4 + ): """Returns a 2d Gaussian distribution object with given mean and covariance""" # map mean and covariance to frame coordinates mean = self.axes.coords_to_point(*mean) @@ -76,11 +85,11 @@ def construct_gaussian_distribution(self, mean, covariance, color=ORANGE, ellipse_width = width * (1 - opacity) ellipse_height = height * (1 - opacity) ellipse = Ellipse( - width=ellipse_width, - height=ellipse_height, - color=color, - fill_opacity=opacity, - stroke_width=0.0 + width=ellipse_width, + height=ellipse_height, + color=color, + fill_opacity=opacity, + stroke_width=0.0, ) ellipse.move_to(mean) ellipse.rotate(rotation) @@ -97,15 +106,15 @@ def construct_simple_gaussian_ellipse(self, mean, covariance, color=ORANGE): ellipses = VGroup() opacity = 0.4 ellipse = Ellipse( - width=width, - height=height, - color=color, - fill_opacity=opacity, - stroke_width=1.0 + width=width, + height=height, + color=color, + fill_opacity=opacity, + stroke_width=1.0, ) ellipse.move_to(mean) ellipse.rotate(angle) ellipses.add(ellipse) ellipses.set_z_index(3) - return ellipses \ No newline at end of file + return ellipses diff --git a/manim_ml/scene.py b/manim_ml/scene.py index df5654c..7f682e6 100644 --- a/manim_ml/scene.py +++ b/manim_ml/scene.py @@ -1,17 +1,17 @@ from manim import * + class ManimML3DScene(ThreeDScene): """ - This is a wrapper class for the Manim ThreeDScene + This is a wrapper class for the Manim ThreeDScene - Note: the primary purpose of this is to make it so - that everything inside of a layer + Note: the primary purpose of this is to make it so + that everything inside of a layer """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def play(self): - """ - """ - pass \ No newline at end of file + """ """ + pass diff --git a/setup.py b/setup.py index fcff417..ebe17c0 100644 --- a/setup.py +++ b/setup.py @@ -1,8 +1,8 @@ from setuptools import setup, find_packages setup( - name = "manim_ml", - version = "0.0.12", - description = (" Machine Learning Animations in python using Manim."), + name="manim_ml", + version="0.0.12", + description=(" Machine Learning Animations in python using Manim."), packages=find_packages(), -) \ No newline at end of file +) diff --git a/tests/.DS_Store b/tests/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..7b4368e6066c14f65878ea79bc9119f0bc528a2d GIT binary patch literal 8196 zcmeHM&2G~`5T5O?t3atx3%wL9A#PP^8-9eigisD#34#M4V<&EF)wP4|G^(mfIm0{f z3S4;--h~rh!>&Me44!JKpttv$NlfosLUHs@dyo5G@mtgU)iHhM}VIeeP%4 zM7eSkQUOoYCLev5Lh{aN+Xc&jWxz6E8L$jk2L1;I@SDxe8S}fZ@7mEaU>UfQ4DkEG zMQ7P4e$bzqPZ0CEb;(r}GBKrn788>Jj*DNxwd)q^Ubsw^>-hU31=>X4064z$#8 zQW{RG+*y?sijv)db48t0LrXha1}p<-8Q|KzhHBlTE*%cl@7}98Z3l50K!YEu=jdy{ zrQwQbh9cTWy}yHMKOfQkKB7Af&5$}k_`{lNWxQt4&%5RRKZ0%?<;D8?Pc<<)H9a%y z%sMsaV{13?xCSAKpbtC+#=W zUed`0+tN3jIcLsWSUx(cZ$4Ue>rXb0SKXtho9nCY!}{an<2h&P-h*eay}fupNxv`= zc-fRMCAzNXr{h;#8D~FeWl5YSI}Aa;MFn$*(W5%p@q~Y7Dg&<`I7{gOvo(&*2yblJ zjK&?qRvWkk$dqF0VLg($kC{iCvQG^Ahq6zV?O*?BTc6=v+Y@|t)9>4c-yu#Mz^jgA zN`lPt;PW}2N=|+=a`L`mZ5x)@mPgw-e@1;+%4A;P3snTJ_rMbKpF@YgTKSlBHdk;$ zoYCe^*M@eAu|vNKC;4sS<_+B6v*ndoj$mq+-m*#A?X$4a0H)e5z4V#Wu^pr7$q5cU`VQz8wGk|5ALjn_31e0~f=9n%Zh@HL=&ppRuFca;@#6 we?sTRdIK#5f