Skip to content

Commit 10be78a

Browse files
Add RMSE and Log-Cosh loss functions for the problem #13379
1 parent 7555469 commit 10be78a

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

machine_learning/loss_functions.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -663,17 +663,17 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
663663
return np.sum(kl_loss)
664664

665665

666-
667666
def root_mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
668667
"""
669-
Calculate the Root Mean Squared Error (RMSE) between ground truth and predicted values.
668+
Calculate the Root Mean Squared Error (RMSE) between ground truth and
669+
predicted values.
670670
671671
RMSE is the square root of the mean squared error. It is commonly used in regression
672672
tasks to measure the magnitude of prediction errors in the same units as the target.
673673
674674
RMSE = sqrt( (1/n) * Σ(y_true - y_pred)^2 )
675675
676-
Reference: https://en.wikipedia.org/wiki/Mean_squared_error#Root-mean-square_error
676+
Reference: https://en.wikipedia.org/wiki/Root-mean-square_deviation
677677
678678
Parameters:
679679
- y_true: The true values (ground truth)
@@ -705,7 +705,6 @@ def root_mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
705705
return np.sqrt(mse)
706706

707707

708-
709708
def log_cosh_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
710709
"""
711710
Calculate the Log-Cosh Loss between ground truth and predicted values.
@@ -745,12 +744,12 @@ def log_cosh_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
745744
raise ValueError("Input arrays must have the same length.")
746745

747746
errors = y_pred - y_true
748-
# Use np.logaddexp for numerical stability: log(cosh(x)) = x + log(1 + exp(-2x)) - log(2)
747+
# Use np.logaddexp for numerical stability:
748+
# log(cosh(x)) = x + log(1 + exp(-2x)) - log(2)
749749
# But for simplicity and readability, we use np.cosh with clipping for large values
750750
# Alternatively, use stable version:
751751
loss = np.logaddexp(errors, -errors) - np.log(2)
752752
return np.mean(loss)
753-
754753
if __name__ == "__main__":
755754
import doctest
756755

0 commit comments

Comments
 (0)