diff --git a/examples/sigmoid.ino b/examples/sigmoid.ino new file mode 100644 index 000000000..50003e3ee --- /dev/null +++ b/examples/sigmoid.ino @@ -0,0 +1,75 @@ +#include "fnn.h" + +FNN fnn(6); // Neural network dengan 6 input + +void setup() { + Serial.begin(9600); + + // Set bobot, bias, dan fungsi aktivasi + fnn.setWeights({0.3, 0.5, 0.2, 0.4, 0.1, 0.6}); + fnn.setBiases({0.1, 0.2}); // Menambahkan biases untuk hidden dan output layer + fnn.setActivationFunction(FNN::sigmoid); // Mencoba fungsi aktivasi yang lebih cocok untuk klasifikasi + + // Aturan fuzzy (perhatikan bahwa Anda perlu menyesuaikan output dengan label) + fnn.setFuzzyRules({ + {"Tidak sesuai", 0.0}, + {"Sedikit", 0.2}, + {"Sangat Belum", 0.4}, + {"Belum Banyak", 0.6}, + {"Sedikit Banyak", 0.7}, + {"Banyak", 1.0}, + {"Extrem", 1.1} + }); + + // Data pelatihan + std::vector> trainingInputs = { + {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}, + {1.2, 0.6, 0.3, 0.5, 0.2, 0.7}, + {0.4, 0.3, 0.2, 0.6, 0.5, 0.4}, + {5.1, 2.4, 1.2, 4.1, 3.2, 6.5}, + {3.3, 1.7, 0.6, 3.4, 2.3, 6.1} + }; + std::vector trainingTargets = {"Banyak", "Sedikit", "Tidak sesuai", "Sedikit Banyak", "Banyak"}; + + // Data testing + std::vector> testInputs = { + {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}, + {1.2, 0.6, 0.3, 0.5, 0.2, 0.7}, + {0.4, 0.3, 0.2, 0.6, 0.5, 0.4} + }; + std::vector testTargets = {"Banyak", "Sedikit", "Tidak sesuai"}; + + int numEpochs = 1000; + float learningRate = 0.01; + + // Melatih model + for (int epoch = 0; epoch < numEpochs; ++epoch) { + fnn.train(trainingInputs, trainingTargets, numEpochs, learningRate); // Latih model + + if (epoch % 100 == 0) { // Evaluasi setiap 100 epoch + float accuracy = fnn.evaluateAccuracy(testInputs, testTargets); + float precision = fnn.evaluatePrecision(testInputs, testTargets); + Serial.print("Epoch: "); + Serial.print(epoch); + Serial.print(" | Akurasi: "); + Serial.print(accuracy); + Serial.print("% | Presisi: "); + Serial.print(precision); + Serial.println("%"); + } + } + + // Prediksi setelah pelatihan + Serial.println("Hasil Prediksi setelah Pelatihan:"); + for (size_t i = 0; i < testInputs.size(); ++i) { + String result = fnn.predictFNN(testInputs[i]).c_str(); // Prediksi hasil + Serial.print("Input ke-"); + Serial.print(i + 1); + Serial.print(": "); + Serial.println(result); + } +} + +void loop() { + // Tidak ada loop untuk contoh ini +} diff --git a/library.properties b/library.properties new file mode 100644 index 000000000..55d502599 --- /dev/null +++ b/library.properties @@ -0,0 +1,9 @@ +name=fnn +version=1.0.0 +author=GALIH RIDHO UTOMO +maintainer=GALIH RIDHO UTOMO +sentence=Fuzzy Neural Network for Arduino. +paragraph=The FNN (Fuzzy Neural Network) module implements a hybrid intelligent system that combines neural networks with fuzzy logic principles. This implementation is specifically optimized for Arduino platforms, providing efficient computation while maintaining prediction accuracy. +category=Signal Input/Output +url=https://github.com/4211421036/fnn.git +architectures=* diff --git a/repositories.txt b/repositories.txt index e41d390f1..790a0c857 100644 --- a/repositories.txt +++ b/repositories.txt @@ -7809,3 +7809,4 @@ https://github.com/yunussandikci/ArduinoMessageBus https://bitbucket.org/mgf_ryan/smartsystem https://github.com/SequentMicrosystems/Sequent-8crt-Library https://github.com/dac1e/RtcDueRcf +https://github.com/4211421036/fnn diff --git a/src/fnn.cpp b/src/fnn.cpp new file mode 100644 index 000000000..3292c2944 --- /dev/null +++ b/src/fnn.cpp @@ -0,0 +1,155 @@ +#include "fnn.h" +#include + +// Konstruktor +FNN::FNN(int inputSize, float bias, std::function activation) + : weights(2, std::vector(inputSize, 0.0)), biases(2, bias), activationFunction(activation) { + if (!activationFunction) { + activationFunction = sigmoid; // Default fungsi aktivasi adalah sigmoid + } +} + +// Set bobot +void FNN::setWeights(const std::vector& newWeights) { + if (newWeights.size() == weights[0].size()) { + weights[0] = newWeights; + } +} + +// Set bias +void FNN::setBiases(const std::vector& newBiases) { + if (newBiases.size() == biases.size()) { + biases = newBiases; + } +} + +// Set fungsi aktivasi +void FNN::setActivationFunction(std::function activation) { + activationFunction = activation; +} + +// Set fuzzy rules +void FNN::setFuzzyRules(const std::map& rules) { + fuzzyRules = rules; +} + +// Fungsi aktivasi: Sigmoid +float FNN::sigmoid(float x) { + return 1.0 / (1.0 + exp(-x)); +} + +// Fungsi aktivasi: Tanh +float FNN::tanh(float x) { + return std::tanh(x); +} + +// Fungsi aktivasi: Leaky ReLU +std::function FNN::leakyRelu(float alpha) { + return [alpha](float x) { return (x > 0) ? x : alpha * x; }; +} + +// Fungsi aktivasi: ELU +std::function FNN::elu(float alpha) { + return [alpha](float x) { return (x > 0) ? x : alpha * (exp(x) - 1); }; +} + +// Fungsi aktivasi: Softplus +float FNN::softplus(float x) { + return log(1 + exp(x)); +} + +// Defuzzifikasi +std::string FNN::defuzzify(float fuzzyOutput) { + for (const auto& rule : fuzzyRules) { + if (fuzzyOutput <= rule.second) { + return rule.first; + } + } + return "Undefined"; +} + +// Compute Loss +float FNN::computeLoss(const std::vector& predicted, const std::vector& expected) { + float loss = 0.0f; + for (size_t i = 0; i < predicted.size(); ++i) { + loss += pow(predicted[i] - expected[i], 2); + } + return loss / predicted.size(); +} + +// Train +void FNN::train(const std::vector>& inputs, const std::vector& targets, int epochs, float learningRate) { + for (int epoch = 0; epoch < epochs; ++epoch) { + for (size_t i = 0; i < inputs.size(); ++i) { + float hiddenSum = biases[0]; + for (size_t j = 0; j < weights[0].size(); ++j) { + hiddenSum += inputs[i][j] * weights[0][j]; + } + float hiddenOutput = activationFunction(hiddenSum); + + float outputSum = hiddenOutput * weights[1][0] + biases[1]; + float output = activationFunction(outputSum); + + float outputError = fuzzyRules[targets[i]] - output; + weights[1][0] += learningRate * outputError * hiddenOutput; + biases[1] += learningRate * outputError; + + float hiddenError = outputError * weights[1][0]; + for (size_t j = 0; j < weights[0].size(); ++j) { + weights[0][j] += learningRate * hiddenError * inputs[i][j]; + } + biases[0] += learningRate * hiddenError; + } + } +} + +// Predict +std::string FNN::predictFNN(const std::vector& inputs) { + float hiddenSum = biases[0]; + for (size_t j = 0; j < weights[0].size(); ++j) { + hiddenSum += inputs[j] * weights[0][j]; + } + float hiddenOutput = activationFunction(hiddenSum); + + float outputSum = hiddenOutput * weights[1][0] + biases[1]; + float output = activationFunction(outputSum); + + return defuzzify(output); +} +// Evaluasi Akurasi +float FNN::evaluateAccuracy(const std::vector>& testInputs, const std::vector& expectedOutputs) { + int correctPredictions = 0; + + for (size_t i = 0; i < testInputs.size(); ++i) { + std::string predictedOutput = predictFNN(testInputs[i]); + if (predictedOutput == expectedOutputs[i]) { + correctPredictions++; + } + } + + float accuracy = (float)correctPredictions / testInputs.size(); + return accuracy * 100.0f; // Hasil dalam persen +} + +// Evaluasi Presisi +float FNN::evaluatePrecision(const std::vector>& testInputs, const std::vector& expectedOutputs) { + int truePositives = 0; + int falsePositives = 0; + + for (size_t i = 0; i < testInputs.size(); ++i) { + std::string predictedOutput = predictFNN(testInputs[i]); + + if (predictedOutput == expectedOutputs[i]) { + truePositives++; + } else if (fuzzyRules.find(predictedOutput) != fuzzyRules.end()) { + falsePositives++; + } + } + + if (truePositives + falsePositives == 0) { + return 0.0f; // Hindari pembagian dengan nol + } + + float precision = (float)truePositives / (truePositives + falsePositives); + return precision * 100.0f; // Hasil dalam persen +} diff --git a/src/fnn.h b/src/fnn.h new file mode 100644 index 000000000..262750e3a --- /dev/null +++ b/src/fnn.h @@ -0,0 +1,58 @@ +#ifndef FNN_H +#define FNN_H + +#include +#include +#include +#include +#include + +class FNN { +private: + std::vector> weights; // Bobot untuk tiap layer + std::vector biases; // Bias untuk tiap layer + std::function activationFunction; // Fungsi aktivasi + std::map fuzzyRules; // Aturan fuzzy + + float computeLoss(const std::vector& predicted, const std::vector& expected); + + // Defuzzifikasi + std::string defuzzify(float fuzzyOutput); + +public: + FNN(int inputSize = 3, float bias = 0.1, std::function activation = nullptr); + + // Set bobot + void setWeights(const std::vector& newWeights); + + // Set bias + void setBiases(const std::vector& newBiases); + + // Set fungsi aktivasi + void setActivationFunction(std::function activation); + + // Set fuzzy rules + void setFuzzyRules(const std::map& rules); + + // Prediksi FNN + std::string predictFNN(const std::vector& inputs); + + // Fungsi pelatihan + void train(const std::vector>& inputs, const std::vector& targets, int epochs = 100, float learningRate = 0.01); + + // Evaluasi + // Evaluasi Akurasi + float evaluateAccuracy(const std::vector>& testInputs, const std::vector& expectedOutputs); + + // Evaluasi Presisi + float evaluatePrecision(const std::vector>& testInputs, const std::vector& expectedOutputs); + + // Fungsi aktivasi yang disediakan + static float sigmoid(float x); + static float tanh(float x); + static std::function leakyRelu(float alpha = 0.01); + static std::function elu(float alpha = 1.0); + static float softplus(float x); +}; + +#endif