forked from TannerGilbert/Machine-Learning-Explained
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Renamed loss functions to metrics and added R2 score and Tweedie devi…
…ance
- Loading branch information
1 parent
abe8693
commit 7580439
Showing
33 changed files
with
1,603 additions
and
13 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
# Metrics | ||
|
||
## Classification | ||
|
||
### Binary cross entropy | ||
|
||
<p align="center"><img src="tex/4d11c68926ab6b030d5eddb9e04c79d2.svg?invert_in_darkmode" align=middle width=565.88156955pt height=49.2398742pt/></p> | ||
|
||
### Accuracy Score | ||
|
||
<p align="center"><img src="tex/db850e0baa86d7832b5c75d7c4488d78.svg?invert_in_darkmode" align=middle width=320.50695269999994pt height=49.2398742pt/></p> | ||
|
||
### Hinge Loss | ||
|
||
<p align="center"><img src="tex/a2f8c376f4edcf8033377b40424b287d.svg?invert_in_darkmode" align=middle width=265.89446730000003pt height=17.031940199999998pt/></p> | ||
|
||
## Regression | ||
|
||
### Mean Squared Error | ||
|
||
<p align="center"><img src="tex/735371fbbd0b21c453edc23b25d47a60.svg?invert_in_darkmode" align=middle width=292.4476896pt height=49.2398742pt/></p> | ||
|
||
### Mean Squared Logarithmic Error | ||
|
||
<p align="center"><img src="tex/61e1a35fbe056f586e6a9dbc645eabb7.svg?invert_in_darkmode" align=middle width=441.49680795pt height=49.2398742pt/></p> | ||
|
||
### Mean Absolute Error | ||
|
||
<p align="center"><img src="tex/5cd6e6c44dcdc5d9134e7ff6c5b812fc.svg?invert_in_darkmode" align=middle width=290.09589345pt height=49.2398742pt/></p> | ||
|
||
### Mean Absolute Percentage Error | ||
|
||
<p align="center"><img src="tex/d8bc4fe1fed0596068b06f14dc5b6186.svg?invert_in_darkmode" align=middle width=321.60739545pt height=49.2398742pt/></p> | ||
|
||
### Median Absolute Error | ||
|
||
<p align="center"><img src="tex/ce9e403e07bb796a5a4aea8e9aea8727.svg?invert_in_darkmode" align=middle width=361.860477pt height=16.438356pt/></p> | ||
|
||
### Cosine Similarity | ||
|
||
<p align="center"><img src="tex/0df67ef21a0ddee56433ca033cb933c1.svg?invert_in_darkmode" align=middle width=506.11591634999996pt height=91.2537549pt/></p> | ||
|
||
### R2 Score | ||
|
||
<p align="center"><img src="tex/a1b798ffc158c4ee0b440f4114c4f1c0.svg?invert_in_darkmode" align=middle width=216.35715585pt height=41.065845149999994pt/></p> | ||
|
||
where <img src="tex/d5d6a7178f9ca2be9eab3bf855709944.svg?invert_in_darkmode" align=middle width=100.29605354999998pt height=27.77565449999998pt/> | ||
|
||
## Tweedie deviance | ||
|
||
<p align="center"><img src="tex/bfcf5229cb3b2eb7b6472152c5538e88.svg?invert_in_darkmode" align=middle width=604.5553041pt height=100.10823074999999pt/></p> | ||
|
||
### Huber Loss | ||
|
||
<p align="center"><img src="tex/9152eaad31adde13360d6613b5dcc757.svg?invert_in_darkmode" align=middle width=265.48753109999996pt height=49.315569599999996pt/></p> | ||
|
||
### Log Cosh Loss | ||
|
||
<p align="center"><img src="tex/1ff5c2fb18f358c5a53d9f38bb1538b8.svg?invert_in_darkmode" align=middle width=300.97297725pt height=49.2398742pt/></p> | ||
|
||
## KL Divergence | ||
|
||
<p align="center"><img src="tex/2d53cab7cfb446342d0f16408338cde0.svg?invert_in_darkmode" align=middle width=240.76881674999996pt height=49.2398742pt/></p> |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
# Metrics | ||
|
||
## Classification | ||
|
||
### Binary cross entropy | ||
|
||
$$\text{BinaryCrossentropy}(y, \hat{y}) = \frac{1}{n_\text{samples}} \sum_{i=0}^{n_\text{samples} - 1} y_i * \log{\hat{y}_i} + \left(1-y_i\right) * \log{(1-\hat{y}_i)}$$ | ||
|
||
### Accuracy Score | ||
|
||
$$\text{accuracy}(y, \hat{y}) = \frac{1}{n_\text{samples}} \sum_{i=0}^{n_\text{samples}-1} 1(\hat{y}_i = y_i)$$ | ||
|
||
### Hinge Loss | ||
|
||
$$L_\text{Hinge}(y_w, y_t) = \max\left\{1 + y_t - y_w, 0\right\}$$ | ||
|
||
## Regression | ||
|
||
### Mean Squared Error | ||
|
||
$$\text{MSE}(y, \hat{y}) = \frac{1}{n_\text{samples}} \sum_{i=0}^{n_\text{samples} - 1} (y_i - \hat{y}_i)^2.$$ | ||
|
||
### Mean Squared Logarithmic Error | ||
|
||
$$\text{MSLE}(y, \hat{y}) = \frac{1}{n_\text{samples}} \sum_{i=0}^{n_\text{samples} - 1} (\log_e (1 + y_i) - \log_e (1 + \hat{y}_i) )^2.$$ | ||
|
||
### Mean Absolute Error | ||
|
||
$$\text{MAE}(y, \hat{y}) = \frac{1}{n_{\text{samples}}} \sum_{i=0}^{n_{\text{samples}}-1} \left| y_i - \hat{y}_i \right|.$$ | ||
|
||
### Mean Absolute Percentage Error | ||
|
||
$$\text{MAPE}(y, \hat{y}) = \frac{1}{n_{\text{samples}}} \sum_{i=0}^{n_{\text{samples}}-1} \frac{{}\left| y_i - \hat{y}_i \right|}{max(\epsilon, \left| y_i \right|)}$$ | ||
|
||
### Median Absolute Error | ||
|
||
$$\text{MedAE}(y, \hat{y}) = \text{median}(\mid y_1 - \hat{y}_1 \mid, \ldots, \mid y_n - \hat{y}_n \mid).$$ | ||
|
||
### Cosine Similarity | ||
|
||
$$\text{cosine similarity}=S_{C}(A,B):=\cos(\theta )={\mathbf {A} \cdot \mathbf {B} \over \|\mathbf {A} \|\|\mathbf {B} \|}={\frac {\sum \limits _{i=1}^{n}{A_{i}B_{i}}}{{\sqrt {\sum \limits _{i=1}^{n}{A_{i}^{2}}}}{\sqrt {\sum \limits _{i=1}^{n}{B_{i}^{2}}}}}}$$ | ||
|
||
### R2 Score | ||
|
||
$$R^2(y, \hat{y}) = 1 - \frac{\sum_{i=1}^{n} (y_i - \hat{y}_i)^2}{\sum_{i=1}^{n} (y_i - \bar{y})^2}$$ | ||
|
||
where $\bar{y} = \frac{1}{n} \sum_{i=1}^{n} y_i$ | ||
|
||
## Tweedie deviance | ||
|
||
$$\begin{align} \begin{split}\text{D}(y, \hat{y}) = \frac{1}{n_\text{samples}} \sum_{i=0}^{n_\text{samples} - 1} \begin{cases} (y_i-\hat{y}_i)^2, & \text{for }p=0\text{ (Normal)}\\ 2(y_i \log(y/\hat{y}_i) + \hat{y}_i - y_i), & \text{for }p=1\text{ (Poisson)}\\ 2(\log(\hat{y}_i/y_i) + y_i/\hat{y}_i - 1), & \text{for }p=2\text{ (Gamma)}\\ 2\left(\frac{\max(y_i,0)^{2-p}}{(1-p)(2-p)}-\frac{y\,\hat{y}^{1-p}_i}{1-p}+\frac{\hat{y}^{2-p}_i}{2-p}\right), & \text{otherwise} \end{cases}\end{split} \end{align}$$ | ||
|
||
### Huber Loss | ||
|
||
$$L_{\delta }(y, \hat{y})={\begin{cases}{\frac {1}{2}}{(y - )^{2}}&{\text{for }}|a|\leq \delta ,\\\delta (|a|-{\frac {1}{2}}\delta ),&{\text{otherwise.}}\end{cases}}$$ | ||
|
||
### Log Cosh Loss | ||
|
||
$$\text{log cosh} = \frac{1}{n_{\text{samples}}} \sum_{i=0}^{n_{\text{samples}}-1} \log{\left(\cosh{(x)}\right)} $$ | ||
|
||
## KL Divergence | ||
|
||
$$D_{\text{KL}}(y\parallel \hat{y})=\sum_{i=0}^{n_{\text{samples}}-1}y \log{ \left({\frac{y}{\hat{y}}}\right)}$$ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
import numpy as np | ||
|
||
|
||
class Accuracy: | ||
def __call__(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return self.loss(y, y_pred) | ||
|
||
def loss(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return np.sum(y == y_pred) / y.shape[0] |
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
import numpy as np | ||
|
||
|
||
class MedianAbsoluteError: | ||
def __call__(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return self.loss(y, y_pred) | ||
|
||
def loss(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return np.median(np.absolute(y - y_pred)) |
1 change: 1 addition & 0 deletions
1
Loss_Functions/poisson_loss/code/poisson.py → Metrics/code/poisson.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,4 @@ | ||
# based on https://keras.io/api/losses/probabilistic_losses/#poisson-class | ||
import numpy as np | ||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
import numpy as np | ||
|
||
|
||
class R2Score: | ||
def __call__(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return self.loss(y, y_pred) | ||
|
||
def loss(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return 1 - (np.sum(np.power(y-y_pred, 2))) / (np.sum(np.power(y-np.mean(y), 2))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
import numpy as np | ||
|
||
|
||
class TweedieDeviance: | ||
def __init__(self, power: int) -> None: | ||
self.power = power | ||
|
||
def __call__(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
return self.loss(y, y_pred) | ||
|
||
def loss(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64: | ||
if self.power == 0: | ||
return np.sum(np.power(y - y_pred, 2)) / y.shape[0] | ||
elif self.power == 1: | ||
return np.sum(2 * (y * np.log(y / y_pred) + y_pred - y)) / y.shape[0] | ||
elif self.power == 2: | ||
return np.sum(2 * (np.log(y_pred / y) + y / y_pred - 1)) / y.shape[0] | ||
else: | ||
return np.sum(2 * (np.power(np.maximum(y, 0), 2-self.power) / ((1-self.power) * (2-self.power)) - (y * np.power(y_pred, 1 - self.power)) / (1 - self.power) + np.power(y_pred, 2 - self.power) / (2 - self.power))) / y.shape[0] |
Oops, something went wrong.