1#ifndef PROG3_NN_FINAL_PROJECT_V2025_01_LOSS_H
2#define PROG3_NN_FINAL_PROJECT_V2025_01_LOSS_H
33 template <
typename Prediction,
typename Expected>
34 MSELoss(Prediction&& y_prediction, Expected&& y_true)
35 : y_prediction(std::forward<Prediction>(y_prediction)),
36 y_true(std::forward<Expected>(y_true)) {
37 if (y_prediction.shape() != y_true.shape()) {
38 throw std::invalid_argument(
"algebra::Tensors have incompatible shapes");
47 auto loss() const -> T
override {
49 const size_t num_elements = y_true.size();
50 for (
size_t i = 0; i < num_elements; ++i) {
51 T difference = y_prediction[i] - y_true[i];
52 sum += difference * difference;
54 return sum /
static_cast<T
>(num_elements);
63 return T(2) / y_true.
size() * (y_prediction - y_true);
85 template <
typename Prediction,
typename Expected>
86 BCELoss(Prediction&& y_prediction, Expected&& y_true)
87 : y_prediction(std::forward<Prediction>(y_prediction)),
88 y_true(std::forward<Expected>(y_true)) {
89 if (y_prediction.shape() != y_true.shape()) {
90 throw std::invalid_argument(
"algebra::Tensors have incompatible shapes");
99 auto loss() const -> T
override {
101 const size_t num_elements = y_true.size();
102 for (
size_t i = 0; i < num_elements; ++i) {
103 sum += y_true[i] * std::log(y_prediction[i]) +
104 (1 - y_true[i]) * std::log(1 - y_prediction[i]);
106 return -sum /
static_cast<T
>(num_elements);
115 return -((y_true / y_prediction) -
116 ((
static_cast<T
>(1) - y_true) / (
static_cast<T
>(1) - y_prediction))) /
126 template <
typename T>
140 template <
typename Prediction,
typename Expected>
142 : y_prediction(std::forward<Prediction>(y_prediction)),
143 y_true(std::forward<Expected>(y_true)),
145 if (y_prediction.shape() != y_true.shape()) {
146 throw std::invalid_argument(
"algebra::Tensors have incompatible shapes");
156 auto loss() const -> T
override {
158 const std::size_t num_samples = y_true.shape()[0];
159 const std::size_t num_classes = y_true.shape()[1];
161 for (std::size_t i = 0; i < num_samples; ++i) {
162 for (std::size_t j = 0; j < num_classes; ++j) {
163 const T pred = std::clamp(y_prediction(i, j), epsilon, 1 - epsilon);
164 sum += y_true(i, j) * std::log(pred);
168 return -sum / num_samples;
178 const std::size_t num_samples = y_true.
shape()[0];
180 return grad / num_samples;
Representa un tensor de tipo T y rango Rank.
Definition tensor.h:63
auto size() const -> size_t
Definition tensor.h:175
auto shape() const noexcept -> const std::array< size_t, Rank > &
Definition tensor.h:179
Representa un tensor de tipo T y rango Rank.
Definition tensor.h:63
auto loss_gradient() const -> algebra::Tensor< T, 2 > override
Gradiente de la pérdida BCE con respecto a las predicciones.
Definition loss.h:114
auto loss() const -> T override
Devuelve el valor de la pérdida BCE.
Definition loss.h:99
BCELoss(Prediction &&y_prediction, Expected &&y_true)
Constructor con predicciones y etiquetas verdaderas.
Definition loss.h:86
CrossEntropyLoss(Prediction &&y_prediction, Expected &&y_true, const T epsilon=1e-7)
Constructor que recibe tensores de predicciones y etiquetas reales.
Definition loss.h:141
auto loss() const -> T override
Valor de la pérdida Cross Entropy. Aplica logaritmo y protección contra valores extremos.
Definition loss.h:156
auto loss_gradient() const -> algebra::Tensor< T, 2 > override
Gradiente de la pérdida Cross Entropy. Simplemente calcula la diferencia entre predicción y etiqueta.
Definition loss.h:177
auto loss() const -> T override
Devuelve el valor de la pérdida MSE.
Definition loss.h:47
MSELoss(Prediction &&y_prediction, Expected &&y_true)
Constructor que recibe predicciones y valores reales.
Definition loss.h:34
auto loss_gradient() const -> algebra::Tensor< T, 2 > override
Gradiente de la pérdida con respecto a las predicciones.
Definition loss.h:62
Capa de activación de Rectified Linear Unit (ReLU). Los valores negativos del input se convierten en ...
Definition activation.h:14
Interfaz para una función de pérdida (loss). Se encarga de calcular qué tan mal lo hizo la red con re...
Definition interfaces.h:102