Skip to content

fnn #5956

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed

fnn #5956

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions examples/sigmoid.ino
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
#include "fnn.h"

FNN fnn(6); // Neural network dengan 6 input

void setup() {
Serial.begin(9600);

// Set bobot, bias, dan fungsi aktivasi
fnn.setWeights({0.3, 0.5, 0.2, 0.4, 0.1, 0.6});
fnn.setBiases({0.1, 0.2}); // Menambahkan biases untuk hidden dan output layer
fnn.setActivationFunction(FNN::sigmoid); // Mencoba fungsi aktivasi yang lebih cocok untuk klasifikasi

// Aturan fuzzy (perhatikan bahwa Anda perlu menyesuaikan output dengan label)
fnn.setFuzzyRules({
{"Tidak sesuai", 0.0},
{"Sedikit", 0.2},
{"Sangat Belum", 0.4},
{"Belum Banyak", 0.6},
{"Sedikit Banyak", 0.7},
{"Banyak", 1.0},
{"Extrem", 1.1}

Check failure on line 21 in examples/sigmoid.ino

View workflow job for this annotation

GitHub Actions / spellcheck

Extrem ==> Extremum, Extreme
});

// Data pelatihan
std::vector<std::vector<float>> trainingInputs = {
{4.5, 2.8, 0.9, 3.7, 3.1, 7.9},
{1.2, 0.6, 0.3, 0.5, 0.2, 0.7},
{0.4, 0.3, 0.2, 0.6, 0.5, 0.4},
{5.1, 2.4, 1.2, 4.1, 3.2, 6.5},
{3.3, 1.7, 0.6, 3.4, 2.3, 6.1}
};
std::vector<std::string> trainingTargets = {"Banyak", "Sedikit", "Tidak sesuai", "Sedikit Banyak", "Banyak"};

// Data testing
std::vector<std::vector<float>> testInputs = {
{4.5, 2.8, 0.9, 3.7, 3.1, 7.9},
{1.2, 0.6, 0.3, 0.5, 0.2, 0.7},
{0.4, 0.3, 0.2, 0.6, 0.5, 0.4}
};
std::vector<std::string> testTargets = {"Banyak", "Sedikit", "Tidak sesuai"};

int numEpochs = 1000;
float learningRate = 0.01;

// Melatih model
for (int epoch = 0; epoch < numEpochs; ++epoch) {
fnn.train(trainingInputs, trainingTargets, numEpochs, learningRate); // Latih model

if (epoch % 100 == 0) { // Evaluasi setiap 100 epoch
float accuracy = fnn.evaluateAccuracy(testInputs, testTargets);
float precision = fnn.evaluatePrecision(testInputs, testTargets);
Serial.print("Epoch: ");
Serial.print(epoch);
Serial.print(" | Akurasi: ");
Serial.print(accuracy);
Serial.print("% | Presisi: ");
Serial.print(precision);
Serial.println("%");
}
}

// Prediksi setelah pelatihan
Serial.println("Hasil Prediksi setelah Pelatihan:");
for (size_t i = 0; i < testInputs.size(); ++i) {
String result = fnn.predictFNN(testInputs[i]).c_str(); // Prediksi hasil
Serial.print("Input ke-");
Serial.print(i + 1);
Serial.print(": ");
Serial.println(result);
}
}

void loop() {
// Tidak ada loop untuk contoh ini
}
9 changes: 9 additions & 0 deletions library.properties
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
name=fnn
version=1.0.0
author=GALIH RIDHO UTOMO
maintainer=GALIH RIDHO UTOMO <g4lihru@students.unnes.ac.id>
sentence=Fuzzy Neural Network for Arduino.
paragraph=The FNN (Fuzzy Neural Network) module implements a hybrid intelligent system that combines neural networks with fuzzy logic principles. This implementation is specifically optimized for Arduino platforms, providing efficient computation while maintaining prediction accuracy.
category=Signal Input/Output
url=https://github.com/4211421036/fnn.git
architectures=*
1 change: 1 addition & 0 deletions repositories.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7809,3 +7809,4 @@ https://github.com/yunussandikci/ArduinoMessageBus
https://bitbucket.org/mgf_ryan/smartsystem
https://github.com/SequentMicrosystems/Sequent-8crt-Library
https://github.com/dac1e/RtcDueRcf
https://github.com/4211421036/fnn
155 changes: 155 additions & 0 deletions src/fnn.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
#include "fnn.h"
#include <Arduino.h>

// Konstruktor
FNN::FNN(int inputSize, float bias, std::function<float(float)> activation)
: weights(2, std::vector<float>(inputSize, 0.0)), biases(2, bias), activationFunction(activation) {
if (!activationFunction) {
activationFunction = sigmoid; // Default fungsi aktivasi adalah sigmoid
}
}

// Set bobot
void FNN::setWeights(const std::vector<float>& newWeights) {
if (newWeights.size() == weights[0].size()) {
weights[0] = newWeights;
}
}

// Set bias
void FNN::setBiases(const std::vector<float>& newBiases) {
if (newBiases.size() == biases.size()) {
biases = newBiases;
}
}

// Set fungsi aktivasi
void FNN::setActivationFunction(std::function<float(float)> activation) {
activationFunction = activation;
}

// Set fuzzy rules
void FNN::setFuzzyRules(const std::map<std::string, float>& rules) {
fuzzyRules = rules;
}

// Fungsi aktivasi: Sigmoid
float FNN::sigmoid(float x) {
return 1.0 / (1.0 + exp(-x));
}

// Fungsi aktivasi: Tanh
float FNN::tanh(float x) {
return std::tanh(x);
}

// Fungsi aktivasi: Leaky ReLU
std::function<float(float)> FNN::leakyRelu(float alpha) {
return [alpha](float x) { return (x > 0) ? x : alpha * x; };
}

// Fungsi aktivasi: ELU
std::function<float(float)> FNN::elu(float alpha) {
return [alpha](float x) { return (x > 0) ? x : alpha * (exp(x) - 1); };
}

// Fungsi aktivasi: Softplus
float FNN::softplus(float x) {
return log(1 + exp(x));
}

// Defuzzifikasi
std::string FNN::defuzzify(float fuzzyOutput) {
for (const auto& rule : fuzzyRules) {
if (fuzzyOutput <= rule.second) {
return rule.first;
}
}
return "Undefined";
}

// Compute Loss
float FNN::computeLoss(const std::vector<float>& predicted, const std::vector<float>& expected) {
float loss = 0.0f;
for (size_t i = 0; i < predicted.size(); ++i) {
loss += pow(predicted[i] - expected[i], 2);
}
return loss / predicted.size();
}

// Train
void FNN::train(const std::vector<std::vector<float>>& inputs, const std::vector<std::string>& targets, int epochs, float learningRate) {
for (int epoch = 0; epoch < epochs; ++epoch) {
for (size_t i = 0; i < inputs.size(); ++i) {
float hiddenSum = biases[0];
for (size_t j = 0; j < weights[0].size(); ++j) {
hiddenSum += inputs[i][j] * weights[0][j];
}
float hiddenOutput = activationFunction(hiddenSum);

float outputSum = hiddenOutput * weights[1][0] + biases[1];
float output = activationFunction(outputSum);

float outputError = fuzzyRules[targets[i]] - output;
weights[1][0] += learningRate * outputError * hiddenOutput;
biases[1] += learningRate * outputError;

float hiddenError = outputError * weights[1][0];
for (size_t j = 0; j < weights[0].size(); ++j) {
weights[0][j] += learningRate * hiddenError * inputs[i][j];
}
biases[0] += learningRate * hiddenError;
}
}
}

// Predict
std::string FNN::predictFNN(const std::vector<float>& inputs) {
float hiddenSum = biases[0];
for (size_t j = 0; j < weights[0].size(); ++j) {
hiddenSum += inputs[j] * weights[0][j];
}
float hiddenOutput = activationFunction(hiddenSum);

float outputSum = hiddenOutput * weights[1][0] + biases[1];
float output = activationFunction(outputSum);

return defuzzify(output);
}
// Evaluasi Akurasi
float FNN::evaluateAccuracy(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs) {
int correctPredictions = 0;

for (size_t i = 0; i < testInputs.size(); ++i) {
std::string predictedOutput = predictFNN(testInputs[i]);
if (predictedOutput == expectedOutputs[i]) {
correctPredictions++;
}
}

float accuracy = (float)correctPredictions / testInputs.size();
return accuracy * 100.0f; // Hasil dalam persen
}

// Evaluasi Presisi
float FNN::evaluatePrecision(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs) {
int truePositives = 0;
int falsePositives = 0;

for (size_t i = 0; i < testInputs.size(); ++i) {
std::string predictedOutput = predictFNN(testInputs[i]);

if (predictedOutput == expectedOutputs[i]) {
truePositives++;
} else if (fuzzyRules.find(predictedOutput) != fuzzyRules.end()) {
falsePositives++;
}
}

if (truePositives + falsePositives == 0) {
return 0.0f; // Hindari pembagian dengan nol
}

float precision = (float)truePositives / (truePositives + falsePositives);
return precision * 100.0f; // Hasil dalam persen
}
58 changes: 58 additions & 0 deletions src/fnn.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#ifndef FNN_H
#define FNN_H

#include <cmath>
#include <vector>
#include <functional>
#include <map>
#include <string>

class FNN {
private:
std::vector<std::vector<float>> weights; // Bobot untuk tiap layer
std::vector<float> biases; // Bias untuk tiap layer
std::function<float(float)> activationFunction; // Fungsi aktivasi
std::map<std::string, float> fuzzyRules; // Aturan fuzzy

float computeLoss(const std::vector<float>& predicted, const std::vector<float>& expected);

// Defuzzifikasi
std::string defuzzify(float fuzzyOutput);

public:
FNN(int inputSize = 3, float bias = 0.1, std::function<float(float)> activation = nullptr);

// Set bobot
void setWeights(const std::vector<float>& newWeights);

// Set bias
void setBiases(const std::vector<float>& newBiases);

// Set fungsi aktivasi
void setActivationFunction(std::function<float(float)> activation);

// Set fuzzy rules
void setFuzzyRules(const std::map<std::string, float>& rules);

// Prediksi FNN
std::string predictFNN(const std::vector<float>& inputs);

// Fungsi pelatihan
void train(const std::vector<std::vector<float>>& inputs, const std::vector<std::string>& targets, int epochs = 100, float learningRate = 0.01);

// Evaluasi
// Evaluasi Akurasi
float evaluateAccuracy(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs);

// Evaluasi Presisi
float evaluatePrecision(const std::vector<std::vector<float>>& testInputs, const std::vector<std::string>& expectedOutputs);

// Fungsi aktivasi yang disediakan
static float sigmoid(float x);
static float tanh(float x);
static std::function<float(float)> leakyRelu(float alpha = 0.01);
static std::function<float(float)> elu(float alpha = 1.0);
static float softplus(float x);
};

#endif
Loading