Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Add ReLU, Sigmoid, and Tanh functions (TheAlgorithms#534)
  • Loading branch information
ethanwater authored Oct 3, 2023
commit d3d0a8a79818fd8cfc2ca2df3a5a23faf81bfc6b
6 changes: 6 additions & 0 deletions src/math/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,16 @@ mod prime_factors;
mod prime_numbers;
mod quadratic_residue;
mod random;
mod relu;
pub mod sieve_of_eratosthenes;
mod sigmoid;
mod signum;
mod simpson_integration;
mod sine;
mod square_pyramidal_numbers;
mod square_root;
mod sum_of_digits;
mod tanh;
mod trial_division;
mod zellers_congruence_algorithm;

Expand Down Expand Up @@ -88,12 +91,15 @@ pub use self::prime_factors::prime_factors;
pub use self::prime_numbers::prime_numbers;
pub use self::quadratic_residue::cipolla;
pub use self::random::PCG32;
pub use self::relu::relu;
pub use self::sieve_of_eratosthenes::sieve_of_eratosthenes;
pub use self::sigmoid::sigmoid;
pub use self::signum::signum;
pub use self::simpson_integration::simpson_integration;
pub use self::sine::sine;
pub use self::square_pyramidal_numbers::square_pyramidal_number;
pub use self::square_root::{fast_inv_sqrt, square_root};
pub use self::sum_of_digits::{sum_digits_iterative, sum_digits_recursive};
pub use self::tanh::tanh;
pub use self::trial_division::trial_division;
pub use self::zellers_congruence_algorithm::zellers_congruence_algorithm;
34 changes: 34 additions & 0 deletions src/math/relu.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
//Rust implementation of the ReLU (rectified linear unit) activation function.
//The formula for ReLU is quite simple really: (if x>0 -> x, else -> 0)
//More information on the concepts of ReLU can be found here:
//https://en.wikipedia.org/wiki/Rectifier_(neural_networks)

//The function below takes a reference to a mutable <f32> Vector as an argument
//and returns the vector with 'ReLU' applied to all values.
//Of course, these functions can be changed by the developer so that the input vector isn't manipulated.
//This is simply an implemenation of the formula.

pub fn relu(array: &mut Vec<f32>) -> &mut Vec<f32> {
//note that these calculations are assuming the Vector values consists of real numbers in radians
for value in &mut *array {
if value <= &mut 0. {
*value = 0.;
}
}

array
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_relu() {
let mut test: Vec<f32> = Vec::from([1.0, 0.5, -1.0, 0.0, 0.3]);
assert_eq!(
relu(&mut test),
&mut Vec::<f32>::from([1.0, 0.5, 0.0, 0.0, 0.3])
);
}
}
34 changes: 34 additions & 0 deletions src/math/sigmoid.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
//Rust implementation of the Sigmoid activation function.
//The formula for Sigmoid: 1 / (1 + e^(-x))
//More information on the concepts of Sigmoid can be found here:
//https://en.wikipedia.org/wiki/Sigmoid_function

//The function below takes a reference to a mutable <f32> Vector as an argument
//and returns the vector with 'Sigmoid' applied to all values.
//Of course, these functions can be changed by the developer so that the input vector isn't manipulated.
//This is simply an implemenation of the formula.

use std::f32::consts::E;

pub fn sigmoid(array: &mut Vec<f32>) -> &mut Vec<f32> {
//note that these calculations are assuming the Vector values consists of real numbers in radians
for value in &mut *array {
*value = 1. / (1. + E.powf(-1. * *value));
}

array
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_sigmoid() {
let mut test = Vec::from([1.0, 0.5, -1.0, 0.0, 0.3]);
assert_eq!(
sigmoid(&mut test),
&mut Vec::<f32>::from([0.7310586, 0.62245935, 0.26894143, 0.5, 0.5744425,])
);
}
}
34 changes: 34 additions & 0 deletions src/math/tanh.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
//Rust implementation of the Tanh (hyperbolic tangent) activation function.
//The formula for Tanh: (e^x - e^(-x))/(e^x + e^(-x)) OR (2/(1+e^(-2x))-1
//More information on the concepts of Sigmoid can be found here:
//https://en.wikipedia.org/wiki/Hyperbolic_functions

//The function below takes a reference to a mutable <f32> Vector as an argument
//and returns the vector with 'Tanh' applied to all values.
//Of course, these functions can be changed by the developer so that the input vector isn't manipulated.
//This is simply an implemenation of the formula.

use std::f32::consts::E;

pub fn tanh(array: &mut Vec<f32>) -> &mut Vec<f32> {
//note that these calculations are assuming the Vector values consists of real numbers in radians
for value in &mut *array {
*value = (2. / (1. + E.powf(-2. * *value))) - 1.;
}

array
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_tanh() {
let mut test = Vec::from([1.0, 0.5, -1.0, 0.0, 0.3]);
assert_eq!(
tanh(&mut test),
&mut Vec::<f32>::from([0.76159406, 0.4621172, -0.7615941, 0.0, 0.29131258,])
);
}
}