diff --git a/.markdownlint.yaml b/.markdownlint.yaml index df577d75..0bc4c5b7 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -23,3 +23,9 @@ MD037: false MD049: false MD046: false + +MD052: false + +MD053: false + +MD045: false diff --git a/dl/energy-based-models/ebm.diffusion.md b/dl/energy-based-models/ebm.diffusion.md index 02deea35..504653f0 100644 --- a/dl/energy-based-models/ebm.diffusion.md +++ b/dl/energy-based-models/ebm.diffusion.md @@ -5,7 +5,7 @@ Many deep learning models utilize the concept of latent space, e.g., $\mathbf z$ which is usually a compression of the real data space, e.g., $\mathbf x$, to enable easier computations for our task. However, such models usually require the computation of an intractable marginalization of the joint distribution $p(\mathbf x, \mathbf z)$ over the latent space[@Luo2022-hz]. To make such computations tractable, we have to apply approximations or theoretical assumptions. -Diffusion models in deep learning establish the connection between the real data space $\mathbf x$ and the latent space $\mathbf z$ assuming invertible [diffusion processes](https://en.wikipedia.org/wiki/Diffusion_process) [@Sohl-Dickstein2015-th][@Ho2020-er]. +Diffusion models in deep learning establish the connection between the real data space $\mathbf x$ and the latent space $\mathbf z$ assuming invertible [diffusion processes](https://en.wikipedia.org/wiki/Diffusion_process) [@Sohl-Dickstein2015-th] [@Ho2020-er]. ## Objective diff --git a/dl/notebooks/transformer_history.py b/dl/notebooks/transformer_history.py index 4be076ad..d8fdeba1 100644 --- a/dl/notebooks/transformer_history.py +++ b/dl/notebooks/transformer_history.py @@ -72,9 +72,11 @@ i["publicationDate"], "%Y-%m-%dT%H:%M:%SZ" ).toordinal() ), - np.log(1 + i.get("forwardEdgeCount", 0)) * (1 + rng.random() * 0.2) - if i.get("forwardEdgeCount", 0) > 50 - else np.log(1 + i.get("forwardEdgeCount", 0)) + rng.random() * 3, + ( + np.log(1 + i.get("forwardEdgeCount", 0)) * (1 + rng.random() * 0.2) + if i.get("forwardEdgeCount", 0) > 50 + else np.log(1 + i.get("forwardEdgeCount", 0)) + rng.random() * 3 + ), ] )