Details
DWT4
import tensorflow as tf
import numpy as np
import pandas as pd
import pydot
import graphviz
from tensorflow import keras
from tensorflow.keras import layers
import tkinter as tk
file_url = "https://drhack.gr/wp-content/uploads/2023/04/DWT-4levels-ECG01NST-60x89-Last.csv"
dataframe = pd.read_csv(file_url)
dataframe.shape
dataframe.head()
val_dataframe = dataframe.sample(frac=0.2, random_state=1337)
train_dataframe = dataframe.drop(val_dataframe.index)
print(
"Using %d samples for training and %d for validation"
% (len(train_dataframe), len(val_dataframe))
)
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("target")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
val_ds = dataframe_to_dataset(val_dataframe)
for x, y in train_ds.take(1):
print("Input:", x)
print("Target:", y)
train_ds = train_ds.batch(32)
val_ds = val_ds.batch(32)
from tensorflow.keras.layers import IntegerLookup
from tensorflow.keras.layers import Normalization
from tensorflow.keras.layers import StringLookup
def encode_numerical_feature(feature, name, dataset):
# Create a Normalization layer for our feature
normalizer = Normalization()
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
# Learn the statistics of the data
normalizer.adapt(feature_ds)
# Normalize the input feature
encoded_feature = normalizer(feature)
return encoded_feature
def encode_categorical_feature(feature, name, dataset, is_string):
lookup_class = StringLookup if is_string else IntegerLookup
# Create a lookup layer which will turn strings into integer indices
lookup = lookup_class(output_mode="binary")
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
# Learn the set of possible string values and assign them a fixed integer index
lookup.adapt(feature_ds)
# Turn the string input into integer indices
encoded_feature = lookup(feature)
return encoded_feature
# Categorical features encoded as integers
ECG = keras.Input(shape=(1,), name="ECG", dtype="int64")
# Categorical feature encoded as string
GROUP = keras.Input(shape=(1,), name="GROUP", dtype="string")
# Numerical features
DWTBPM1 = keras.Input(shape=(1,), name="DWTBPM1")
DWTBPM2 = keras.Input(shape=(1,), name="DWTBPM2")
DWTBPM3 = keras.Input(shape=(1,), name="DWTBPM3")
DWTBPM4 = keras.Input(shape=(1,), name="DWTBPM4")
DWTBPM5 = keras.Input(shape=(1,), name="DWTBPM5")
DWTBPM6 = keras.Input(shape=(1,), name="DWTBPM6")
DWTBPM7 = keras.Input(shape=(1,), name="DWTBPM7")
DWTBPM8 = keras.Input(shape=(1,), name="DWTBPM8")
DWTBPM9 = keras.Input(shape=(1,), name="DWTBPM9")
DWTBPM10 = keras.Input(shape=(1,), name="DWTBPM10")
DWTBPM11 = keras.Input(shape=(1,), name="DWTBPM11")
DWTBPM12 = keras.Input(shape=(1,), name="DWTBPM12")
DWTBPM13 = keras.Input(shape=(1,), name="DWTBPM13")
DWTBPM14 = keras.Input(shape=(1,), name="DWTBPM14")
DWTBPM15 = keras.Input(shape=(1,), name="DWTBPM15")
DWTBPM16 = keras.Input(shape=(1,), name="DWTBPM16")
DWTBPM17 = keras.Input(shape=(1,), name="DWTBPM17")
DWTBPM18 = keras.Input(shape=(1,), name="DWTBPM18")
DWTBPM19 = keras.Input(shape=(1,), name="DWTBPM19")
DWTBPM20 = keras.Input(shape=(1,), name="DWTBPM20")
DWTBPM21 = keras.Input(shape=(1,), name="DWTBPM21")
DWTBPM22 = keras.Input(shape=(1,), name="DWTBPM22")
DWTBPM23 = keras.Input(shape=(1,), name="DWTBPM23")
DWTBPM24 = keras.Input(shape=(1,), name="DWTBPM24")
DWTBPM25 = keras.Input(shape=(1,), name="DWTBPM25")
DWTBPM26 = keras.Input(shape=(1,), name="DWTBPM26")
DWTBPM27 = keras.Input(shape=(1,), name="DWTBPM27")
DWTBPM28 = keras.Input(shape=(1,), name="DWTBPM28")
DWTBPM29 = keras.Input(shape=(1,), name="DWTBPM29")
DWTBPM30 = keras.Input(shape=(1,), name="DWTBPM30")
DWTBPM31 = keras.Input(shape=(1,), name="DWTBPM31")
DWTBPM32 = keras.Input(shape=(1,), name="DWTBPM32")
DWTBPM33 = keras.Input(shape=(1,), name="DWTBPM33")
DWTBPM34 = keras.Input(shape=(1,), name="DWTBPM34")
DWTBPM35 = keras.Input(shape=(1,), name="DWTBPM35")
DWTBPM36 = keras.Input(shape=(1,), name="DWTBPM36")
DWTBPM37 = keras.Input(shape=(1,), name="DWTBPM37")
DWTBPM38 = keras.Input(shape=(1,), name="DWTBPM38")
DWTBPM39 = keras.Input(shape=(1,), name="DWTBPM39")
DWTBPM40 = keras.Input(shape=(1,), name="DWTBPM40")
DWTBPM41 = keras.Input(shape=(1,), name="DWTBPM41")
DWTBPM42 = keras.Input(shape=(1,), name="DWTBPM42")
DWTBPM43 = keras.Input(shape=(1,), name="DWTBPM43")
DWTBPM44 = keras.Input(shape=(1,), name="DWTBPM44")
DWTBPM45 = keras.Input(shape=(1,), name="DWTBPM45")
DWTBPM46 = keras.Input(shape=(1,), name="DWTBPM46")
DWTBPM47 = keras.Input(shape=(1,), name="DWTBPM47")
DWTBPM48 = keras.Input(shape=(1,), name="DWTBPM48")
DWTBPM49 = keras.Input(shape=(1,), name="DWTBPM49")
DWTBPM50 = keras.Input(shape=(1,), name="DWTBPM50")
DWTBPM51 = keras.Input(shape=(1,), name="DWTBPM51")
DWTBPM52 = keras.Input(shape=(1,), name="DWTBPM52")
DWTBPM53 = keras.Input(shape=(1,), name="DWTBPM53")
DWTBPM54 = keras.Input(shape=(1,), name="DWTBPM54")
DWTBPM55 = keras.Input(shape=(1,), name="DWTBPM55")
DWTBPM56 = keras.Input(shape=(1,), name="DWTBPM56")
DWTBPM57 = keras.Input(shape=(1,), name="DWTBPM57")
DWTBPM58 = keras.Input(shape=(1,), name="DWTBPM58")
DWTBPM59 = keras.Input(shape=(1,), name="DWTBPM59")
DWTBPM60 = keras.Input(shape=(1,), name="DWTBPM60")
DWTBPM61 = keras.Input(shape=(1,), name="DWTBPM61")
DWTBPM62 = keras.Input(shape=(1,), name="DWTBPM62")
DWTBPM63 = keras.Input(shape=(1,), name="DWTBPM63")
DWTBPM64 = keras.Input(shape=(1,), name="DWTBPM64")
DWTBPM65 = keras.Input(shape=(1,), name="DWTBPM65")
DWTBPM66 = keras.Input(shape=(1,), name="DWTBPM66")
DWTBPM67 = keras.Input(shape=(1,), name="DWTBPM67")
DWTBPM68 = keras.Input(shape=(1,), name="DWTBPM68")
DWTBPM69 = keras.Input(shape=(1,), name="DWTBPM69")
DWTBPM70 = keras.Input(shape=(1,), name="DWTBPM70")
DWTBPM71 = keras.Input(shape=(1,), name="DWTBPM71")
DWTBPM72 = keras.Input(shape=(1,), name="DWTBPM72")
DWTBPM73 = keras.Input(shape=(1,), name="DWTBPM73")
DWTBPM74 = keras.Input(shape=(1,), name="DWTBPM74")
DWTBPM75 = keras.Input(shape=(1,), name="DWTBPM75")
DWTBPM76 = keras.Input(shape=(1,), name="DWTBPM76")
DWTBPM77 = keras.Input(shape=(1,), name="DWTBPM77")
DWTBPM78 = keras.Input(shape=(1,), name="DWTBPM78")
DWTBPM79 = keras.Input(shape=(1,), name="DWTBPM79")
DWTBPM80 = keras.Input(shape=(1,), name="DWTBPM80")
DWTBPM81 = keras.Input(shape=(1,), name="DWTBPM81")
DWTBPM82 = keras.Input(shape=(1,), name="DWTBPM82")
DWTBPM83 = keras.Input(shape=(1,), name="DWTBPM83")
DWTBPM84 = keras.Input(shape=(1,), name="DWTBPM84")
DWTBPM85 = keras.Input(shape=(1,), name="DWTBPM85")
DWTBPM86 = keras.Input(shape=(1,), name="DWTBPM86")
all_inputs = [
ECG,
GROUP,
DWTBPM1,
DWTBPM2,
DWTBPM3,
DWTBPM4,
DWTBPM5,
DWTBPM6,
DWTBPM7,
DWTBPM8,
DWTBPM9,
DWTBPM10,
DWTBPM11,
DWTBPM12,
DWTBPM13,
DWTBPM14,
DWTBPM15,
DWTBPM16,
DWTBPM17,
DWTBPM18,
DWTBPM19,
DWTBPM20,
DWTBPM21,
DWTBPM22,
DWTBPM23,
DWTBPM24,
DWTBPM25,
DWTBPM26,
DWTBPM27,
DWTBPM28,
DWTBPM29,
DWTBPM30,
DWTBPM31,
DWTBPM32,
DWTBPM33,
DWTBPM34,
DWTBPM35,
DWTBPM36,
DWTBPM37,
DWTBPM38,
DWTBPM39,
DWTBPM40,
DWTBPM41,
DWTBPM42,
DWTBPM43,
DWTBPM44,
DWTBPM45,
DWTBPM46,
DWTBPM47,
DWTBPM48,
DWTBPM49,
DWTBPM50,
DWTBPM51,
DWTBPM52,
DWTBPM53,
DWTBPM54,
DWTBPM55,
DWTBPM56,
DWTBPM57,
DWTBPM58,
DWTBPM59,
DWTBPM60,
DWTBPM61,
DWTBPM62,
DWTBPM63,
DWTBPM64,
DWTBPM65,
DWTBPM66,
DWTBPM67,
DWTBPM68,
DWTBPM69,
DWTBPM70,
DWTBPM71,
DWTBPM72,
DWTBPM73,
DWTBPM74,
DWTBPM75,
DWTBPM76,
DWTBPM77,
DWTBPM78,
DWTBPM79,
DWTBPM80,
DWTBPM81,
DWTBPM82,
DWTBPM83,
DWTBPM84,
DWTBPM85,
DWTBPM86,
]
# Integer categorical features
ECG_encoded = encode_categorical_feature(ECG, "ECG", train_ds, False)
# String categorical features
GROUP_encoded = encode_categorical_feature(GROUP, "GROUP", train_ds, True)
# Numerical features
DWTBPM1_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM1", train_ds)
DWTBPM2_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM2", train_ds)
DWTBPM3_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM3", train_ds)
DWTBPM4_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM4", train_ds)
DWTBPM5_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM5", train_ds)
DWTBPM6_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM6", train_ds)
DWTBPM7_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM7", train_ds)
DWTBPM8_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM8", train_ds)
DWTBPM9_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM9", train_ds)
DWTBPM10_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM1", train_ds)
DWTBPM11_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM11", train_ds)
DWTBPM12_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM12", train_ds)
DWTBPM13_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM13", train_ds)
DWTBPM14_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM14", train_ds)
DWTBPM15_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM15", train_ds)
DWTBPM16_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM16", train_ds)
DWTBPM17_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM17", train_ds)
DWTBPM18_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM18", train_ds)
DWTBPM19_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM19", train_ds)
DWTBPM20_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM20", train_ds)
DWTBPM21_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM21", train_ds)
DWTBPM22_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM22", train_ds)
DWTBPM23_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM23", train_ds)
DWTBPM24_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM24", train_ds)
DWTBPM25_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM25", train_ds)
DWTBPM26_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM26", train_ds)
DWTBPM27_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM27", train_ds)
DWTBPM28_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM28", train_ds)
DWTBPM29_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM29", train_ds)
DWTBPM30_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM30", train_ds)
DWTBPM31_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM31", train_ds)
DWTBPM32_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM32", train_ds)
DWTBPM33_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM33", train_ds)
DWTBPM34_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM34", train_ds)
DWTBPM35_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM35", train_ds)
DWTBPM36_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM36", train_ds)
DWTBPM37_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM37", train_ds)
DWTBPM38_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM38", train_ds)
DWTBPM39_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM39", train_ds)
DWTBPM40_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM40", train_ds)
DWTBPM41_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM41", train_ds)
DWTBPM42_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM42", train_ds)
DWTBPM43_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM43", train_ds)
DWTBPM44_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM44", train_ds)
DWTBPM45_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM45", train_ds)
DWTBPM46_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM46", train_ds)
DWTBPM47_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM47", train_ds)
DWTBPM48_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM48", train_ds)
DWTBPM49_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM49", train_ds)
DWTBPM50_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM50", train_ds)
DWTBPM51_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM51", train_ds)
DWTBPM52_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM52", train_ds)
DWTBPM53_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM53", train_ds)
DWTBPM54_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM54", train_ds)
DWTBPM55_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM55", train_ds)
DWTBPM56_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM56", train_ds)
DWTBPM57_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM57", train_ds)
DWTBPM58_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM58", train_ds)
DWTBPM59_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM59", train_ds)
DWTBPM60_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM60", train_ds)
DWTBPM61_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM61", train_ds)
DWTBPM62_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM62", train_ds)
DWTBPM63_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM63", train_ds)
DWTBPM64_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM64", train_ds)
DWTBPM65_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM65", train_ds)
DWTBPM66_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM66", train_ds)
DWTBPM67_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM67", train_ds)
DWTBPM68_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM68", train_ds)
DWTBPM69_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM69", train_ds)
DWTBPM70_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM70", train_ds)
DWTBPM71_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM71", train_ds)
DWTBPM72_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM72", train_ds)
DWTBPM73_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM73", train_ds)
DWTBPM74_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM74", train_ds)
DWTBPM75_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM75", train_ds)
DWTBPM76_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM76", train_ds)
DWTBPM77_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM77", train_ds)
DWTBPM78_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM78", train_ds)
DWTBPM79_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM79", train_ds)
DWTBPM80_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM80", train_ds)
DWTBPM81_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM81", train_ds)
DWTBPM82_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM82", train_ds)
DWTBPM83_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM83", train_ds)
DWTBPM84_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM84", train_ds)
DWTBPM85_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM85", train_ds)
DWTBPM86_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM86", train_ds)
all_features = layers.concatenate(
[
ECG_encoded,
GROUP_encoded,
DWTBPM1_encoded,
DWTBPM2_encoded,
DWTBPM3_encoded,
DWTBPM4_encoded,
DWTBPM5_encoded,
DWTBPM6_encoded,
DWTBPM7_encoded,
DWTBPM8_encoded,
DWTBPM9_encoded,
DWTBPM10_encoded,
DWTBPM11_encoded,
DWTBPM12_encoded,
DWTBPM13_encoded,
DWTBPM14_encoded,
DWTBPM15_encoded,
DWTBPM16_encoded,
DWTBPM17_encoded,
DWTBPM18_encoded,
DWTBPM19_encoded,
DWTBPM20_encoded,
DWTBPM21_encoded,
DWTBPM22_encoded,
DWTBPM23_encoded,
DWTBPM24_encoded,
DWTBPM25_encoded,
DWTBPM26_encoded,
DWTBPM27_encoded,
DWTBPM28_encoded,
DWTBPM29_encoded,
DWTBPM30_encoded,
DWTBPM31_encoded,
DWTBPM32_encoded,
DWTBPM33_encoded,
DWTBPM34_encoded,
DWTBPM35_encoded,
DWTBPM36_encoded,
DWTBPM37_encoded,
DWTBPM38_encoded,
DWTBPM39_encoded,
DWTBPM40_encoded,
DWTBPM41_encoded,
DWTBPM42_encoded,
DWTBPM43_encoded,
DWTBPM44_encoded,
DWTBPM45_encoded,
DWTBPM46_encoded,
DWTBPM47_encoded,
DWTBPM48_encoded,
DWTBPM49_encoded,
DWTBPM50_encoded,
DWTBPM51_encoded,
DWTBPM52_encoded,
DWTBPM53_encoded,
DWTBPM54_encoded,
DWTBPM55_encoded,
DWTBPM56_encoded,
DWTBPM57_encoded,
DWTBPM58_encoded,
DWTBPM59_encoded,
DWTBPM60_encoded,
DWTBPM61_encoded,
DWTBPM62_encoded,
DWTBPM63_encoded,
DWTBPM64_encoded,
DWTBPM65_encoded,
DWTBPM66_encoded,
DWTBPM67_encoded,
DWTBPM68_encoded,
DWTBPM69_encoded,
DWTBPM70_encoded,
DWTBPM71_encoded,
DWTBPM72_encoded,
DWTBPM73_encoded,
DWTBPM74_encoded,
DWTBPM75_encoded,
DWTBPM76_encoded,
DWTBPM77_encoded,
DWTBPM78_encoded,
DWTBPM79_encoded,
DWTBPM80_encoded,
DWTBPM81_encoded,
DWTBPM82_encoded,
DWTBPM83_encoded,
DWTBPM84_encoded,
DWTBPM85_encoded,
DWTBPM86_encoded,
]
)
x = layers.Dense(32, activation="relu")(all_features)
x = layers.Dropout(0.5)(x)
output = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(all_inputs, output)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
# `rankdir='LR'` is to make the graph horizontal.
keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
model.fit(train_ds, epochs=50, validation_data=val_ds)
sample = {
"ECG": 0,
"GROUP":"T",
"DWTBPM1":15.049,
"DWTBPM2":15.954,
"DWTBPM3":0.71133,
"DWTBPM4":-0.16943,
"DWTBPM5":-0.093617,
"DWTBPM6":-0.3758,
"DWTBPM7":0.4191,
"DWTBPM8":-0.27348,
"DWTBPM9":0.51794,
"DWTBPM10":0.7778,
"DWTBPM11":-0.37705,
"DWTBPM12":-0.43298,
"DWTBPM13":0.49203,
"DWTBPM14":-0.50637,
"DWTBPM15":10.312,
"DWTBPM16":0.11405,
"DWTBPM17":0.87534,
"DWTBPM18":-0.41432,
"DWTBPM19":0.93978,
"DWTBPM20":0.11845,
"DWTBPM21":-0.18828,
"DWTBPM22":-0.2243,
"DWTBPM23":-0.16092,
"DWTBPM24":-0.16067,
"DWTBPM25":-0.17874,
"DWTBPM26":-0.35341,
"DWTBPM27":0.82711,
"DWTBPM28":-0.35977,
"DWTBPM29":0.80043,
"DWTBPM30":0.41024,
"DWTBPM31":-0.0053516,
"DWTBPM32":-0.00056808,
"DWTBPM33":-0.13293,
"DWTBPM34":-0.19591,
"DWTBPM35":-0.25608,
"DWTBPM36":0.73422,
"DWTBPM37":-0.29908,
"DWTBPM38":10.422,
"DWTBPM39":0.31114,
"DWTBPM40":-0.16143,
"DWTBPM41":-0.287,
"DWTBPM42":0.33273,
"DWTBPM43":-0.05755,
"DWTBPM44":0.42025,
"DWTBPM45":10.731,
"DWTBPM46":-0.4099,
"DWTBPM47":-0.18027,
"DWTBPM48":-0.24305,
"DWTBPM49":-0.46275,
"DWTBPM50":0.57248,
"DWTBPM51":-0.21145,
"DWTBPM52":0.44987,
"DWTBPM53":0.78692,
"DWTBPM54":-0.31844,
"DWTBPM55":-0.23371,
"DWTBPM56":-0.052245,
"DWTBPM57":0.15358,
"DWTBPM58":-0.066129,
"DWTBPM59":13.377,
"DWTBPM60":-0.28406,
"DWTBPM61":-0.18193,
"DWTBPM62":-0.38386,
"DWTBPM63":-0.18971,
"DWTBPM64":0.42986,
"DWTBPM65":-0.30491,
"DWTBPM66":12.464,
"DWTBPM67":-0.11152,
"DWTBPM68":-0.18534,
"DWTBPM69":-0.38486,
"DWTBPM70":0.23502,
"DWTBPM71":-0.11435,
"DWTBPM72":0.31003,
"DWTBPM73":0.93679,
"DWTBPM74":-0.43053,
"DWTBPM75":-0.20893,
"DWTBPM76":-0.45197,
"DWTBPM77":0.40383,
"DWTBPM78":-0.29302,
"DWTBPM79":0.5754,
"DWTBPM80":0.68984,
"DWTBPM81":-0.3486,
"DWTBPM82":-0.1586,
"DWTBPM83":-0.37793,
"DWTBPM84":-0.36735,
"DWTBPM85":-0.37782,
"DWTBPM86":-0.37729,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = model.predict(input_dict)
print(
"This particular patient had a %.1f percent probability "
"of having a heart disease, as evaluated by our model." % (100 * predictions[0][0],)
)
sample = {
"ECG": 0,
"GROUP":"N",
"DWTBPM1":15.049,
"DWTBPM2":15.954,
"DWTBPM3":0.71133,
"DWTBPM4":-0.16943,
"DWTBPM5":-0.093617,
"DWTBPM6":-0.3758,
"DWTBPM7":0.4191,
"DWTBPM8":-0.27348,
"DWTBPM9":0.51794,
"DWTBPM10":0.7778,
"DWTBPM11":-0.37705,
"DWTBPM12":-0.43298,
"DWTBPM13":0.49203,
"DWTBPM14":-0.50637,
"DWTBPM15":10.312,
"DWTBPM16":0.11405,
"DWTBPM17":0.87534,
"DWTBPM18":-0.41432,
"DWTBPM19":0.93978,
"DWTBPM20":0.11845,
"DWTBPM21":-0.18828,
"DWTBPM22":-0.2243,
"DWTBPM23":-0.16092,
"DWTBPM24":-0.16067,
"DWTBPM25":-0.17874,
"DWTBPM26":-0.35341,
"DWTBPM27":0.82711,
"DWTBPM28":-0.35977,
"DWTBPM29":0.80043,
"DWTBPM30":0.41024,
"DWTBPM31":-0.0053516,
"DWTBPM32":-0.00056808,
"DWTBPM33":-0.13293,
"DWTBPM34":-0.19591,
"DWTBPM35":-0.25608,
"DWTBPM36":0.73422,
"DWTBPM37":-0.29908,
"DWTBPM38":10.422,
"DWTBPM39":0.31114,
"DWTBPM40":-0.16143,
"DWTBPM41":-0.287,
"DWTBPM42":0.33273,
"DWTBPM43":-0.05755,
"DWTBPM44":0.42025,
"DWTBPM45":10.731,
"DWTBPM46":-0.4099,
"DWTBPM47":-0.18027,
"DWTBPM48":-0.24305,
"DWTBPM49":-0.46275,
"DWTBPM50":0.57248,
"DWTBPM51":-0.21145,
"DWTBPM52":0.44987,
"DWTBPM53":0.78692,
"DWTBPM54":-0.31844,
"DWTBPM55":-0.23371,
"DWTBPM56":-0.052245,
"DWTBPM57":0.15358,
"DWTBPM58":-0.066129,
"DWTBPM59":13.377,
"DWTBPM60":-0.28406,
"DWTBPM61":-0.18193,
"DWTBPM62":-0.38386,
"DWTBPM63":-0.18971,
"DWTBPM64":0.42986,
"DWTBPM65":-0.30491,
"DWTBPM66":12.464,
"DWTBPM67":-0.11152,
"DWTBPM68":-0.18534,
"DWTBPM69":-0.38486,
"DWTBPM70":0.23502,
"DWTBPM71":-0.11435,
"DWTBPM72":0.31003,
"DWTBPM73":0.93679,
"DWTBPM74":-0.43053,
"DWTBPM75":-0.20893,
"DWTBPM76":-0.45197,
"DWTBPM77":0.40383,
"DWTBPM78":-0.29302,
"DWTBPM79":0.5754,
"DWTBPM80":0.68984,
"DWTBPM81":-0.3486,
"DWTBPM82":-0.1586,
"DWTBPM83":-0.37793,
"DWTBPM84":-0.36735,
"DWTBPM85":-0.37782,
"DWTBPM86":-0.37729,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = model.predict(input_dict)
print(
"This particular patient had a %.1f percent probability "
"of having a heart disease, as evaluated by our model." % (100 * predictions[0][0],)
)
#POWERTOP
#!/bin/bash
# Αριθμός μετρήσεων
num_measurements=10
# Διάρκεια κάθε μέτρησης σε δευτερόλεπτα
measurement_duration=1
# Διεύθυνση εξόδου
output_directory="/Desktop/project"
# Δημιουργία του φακέλου εξόδου αν δεν υπάρχει
mkdir -p "metriseis"
# Πίνακας για αποθήκευση των τιμών ισχύος
power_values=()
y=$(echo "scale=2; $x / $num_measurements" | bc)
echo "The value of y is $power_values "
# Επανάληψη για τις μετρήσεις
for ((i=1; i<=$num_measurements; i++)); do
# Ορισμός του ονόματος του αρχείου εξόδου
output_file="Measurement_$i.html"
# Εκτέλεση του powertop με την καθορισμένη διάρκεια και αποθήκευση των αποτελεσμάτων σε μορφή HTML
sudo powertop --time=$measurement_duration --html="$output_file"
echo "Ολοκλήρωση Μέτρησης $i."
x=$((power_value + x))
# Εξαγωγή της τιμής της ισχύος από την αναφορά HTML (προσαρμόστε ανάλογα)
power_value=$(grep "Κατανάλωση Ισχύος" "$output_file" | awk '{print $4}')
#echo "The value of x is $x"
# Προσάρτηση της τιμής της ισχύος στον πίνακα
power_values+=("$power_value")
# Αναμονή για λίγα δευτερόλεπτα ανάμεσα στις μετρήσεις (προσαρμόστε κατά την επιλογή σας)
sleep 0
done
# Υπολογισμός της μέσης κατανάλωσης ισχύος
συνολική_ισχύς=0
for power_value in "${power_values[@]}"; do
συνολική_ισχύς=$(echo "$συνολική_ισχύς + $power_value" | bc)
done
μέση_ισχύς=$(echo "$συνολική_ισχύς / $num_measurements" | bc)
echo "Μέση Κατανάλωση Ισχύος: $μέση_ισχύς Watts"
echo "Ολοκλήρωση όλων των μετρήσεων."
#code Hjorth parameters
# -*- coding: utf-8 -*-
#source Desktop/project/env/bin/activate
#python
import tensorflow as tf
import numpy as np
import pandas as pd
import pydot
import graphviz
from tensorflow import keras
from tensorflow.keras import layers
import tkinter as tk
from tensorflow.keras.layers import IntegerLookup
from tensorflow.keras.layers import Normalization
from tensorflow.keras.layers import StringLookup
import Adafruit_CharLCD as LCD
file_url = "https://drhack.gr/wp-content/uploads/2023/06/Hjorth_P_6x60_Learning_Sets-GROUPED-T_N.csv"
dataframe = pd.read_csv(file_url)
dataframe.shape
dataframe.head()
val_dataframe = dataframe.sample(frac=0.2, random_state=1337)
train_dataframe = dataframe.drop(val_dataframe.index)
print(
"Using %d samples for training and %d for validation"
# % (len(train_dataframe), len(val_dataframe))
)
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("target")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
val_ds = dataframe_to_dataset(val_dataframe)
for x, y in train_ds.take(1):
print("Input:", x)
print("Target:", y)
train_ds = train_ds.batch(32)
val_ds = val_ds.batch(32)
def encode_numerical_feature(feature, name, dataset):
normalizer = Normalization()
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
normalizer.adapt(feature_ds)
encoded_feature = normalizer(feature)
return encoded_feature
def encode_categorical_feature(feature, name, dataset, is_string):
lookup_class = StringLookup if is_string else IntegerLookup
lookup = lookup_class(output_mode="binary")
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
lookup.adapt(feature_ds)
encoded_feature = lookup(feature)
return encoded_feature
ECG = keras.Input(shape=(1,), name="ECG", dtype="int64")
GROUP = keras.Input(shape=(1,), name="GROUP", dtype="string")
HjorthBPM1= keras.Input(shape=(1,), name="HjorthBPM1")
HjorthBPM2 = keras.Input(shape=(1,), name="HjorthBPM2")
HjorthBPM3 = keras.Input(shape=(1,), name="HjorthBPM3")
all_inputs = [
ECG,
GROUP,
HjorthBPM1,
HjorthBPM2,
HjorthBPM3,]
ECG_encoded = encode_categorical_feature(ECG, "ECG", train_ds, False)
GROUP_encoded = encode_categorical_feature(GROUP, "GROUP", train_ds, True)
HjorthBPM1_encoded = encode_numerical_feature(HjorthBPM1, "HjorthBPM1", train_ds)
HjorthBPM2_encoded = encode_numerical_feature(HjorthBPM2, "HjorthBPM2", train_ds)
HjorthBPM3_encoded = encode_numerical_feature(HjorthBPM3, "HjorthBPM3", train_ds)
all_features = layers.concatenate(
[ECG_encoded,
GROUP_encoded,
HjorthBPM1_encoded,
HjorthBPM2_encoded,
HjorthBPM3_encoded,]
)
x = layers.Dense(32, activation="relu")(all_features)
x = layers.Dropout(0.5)(x)
output = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(all_inputs, output)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
"""Let's visualize our connectivity graph:"""
# `rankdir='LR'` is to make the graph horizontal.
keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
"""## Train the model"""
model.fit(train_ds, epochs=250, validation_data=val_ds)
"""We quickly get to 80% validation accuracy.
## Inference on new data
To get a prediction for a new sample, you can simply call `model.predict()`. There are
just two things you need to do:
1. wrap scalars into a list so as to have a batch dimension (models only process batches
of data, not single samples)
2. Call `convert_to_tensor` on each feature
"""
# LCD pin configuration (adjust as needed)
lcd_rs = 25 # RS
lcd_en = 24 # EN
lcd_d4 = 23 # D4
lcd_d5 = 17 # D5
lcd_d6 = 18 # D6
lcd_d7 = 22 # D7
# LCD column and row sizes (16x2 LCD in this example)
lcd_columns = 16
lcd_rows = 2
# Create the LCD instance
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows)
X1=0.19599,
X2=15.591,
X3=0.55816,
sample = {
"ECG": 0,
"GROUP":"T",
"HjorthBPM1":X1,
"HjorthBPM2":X2,
"HjorthBPM3":X3,}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = model.predict(input_dict)
number=(100*predictions[0][0])
rounded_number = round(number, 2)
message="This particular patient had a "
message2="% of having a heart disease, as evaluated by our model."
Super_message=message + str(rounded_number) + message2
def display_result():
result = Super_message # Replace with your actual result
result_label.config(text=result)
app = tk.Tk()
app.title("Result for Terminating AF ")
result_label = tk.Label(app, text="Terminating AF", font=("Arial", 20))
result_label.pack()
show_result_button = tk.Button(app, text="Show Result", command=display_result)
show_result_button.pack()
sample2 = {
"ECG": 0,
"GROUP":"N",
"HjorthBPM1":X1,
"HjorthBPM2":X2,
"HjorthBPM3":X3,}
input_dict2 = {name: tf.convert_to_tensor([value]) for name, value in sample2.items()}
predictions2 = model.predict(input_dict2)
number2=(100*predictions2[0][0])
rounded_number2 = round(number2, 2)
Super_message2=message + str(rounded_number2) + message2
def display_result2():
result2 = Super_message2 # Replace with your actual result
result2_label.config(text=result2)
app2 = tk.Tk()
app2.title("Result for non-terminating AF ")
result2_label = tk.Label(app2, text="Non-Terminating AF", font=("Arial", 20))
result2_label.pack()
show_result2_button = tk.Button(app2, text="Show Result2", command=display_result2)
show_result2_button.pack()
message3="NON - T.F "
message5="T.F "
message4 ="%"
Super2_message2=message3 + str(rounded_number) + message4
Super3_message2=message5 + str(rounded_number2) + message4
# Display text
lcd.set_cursor(0, 0) # Set the cursor to the first line (0) and first column (0)
lcd.message(Super2_message2)
lcd.set_cursor(0, 1) # Move the cursor to the first column (0) of the second line (1)
lcd.message(Super3_message2)
app.mainloop()
# Clear the display
lcd.clear()
Κώδικας Απλός
import tensorflow as tf
import numpy as np
import pandas as pd
import pydot
import graphviz
from tensorflow import keras
from tensorflow.keras import layers
import tkinter as tk
file_url = "https://drhack.gr/wp-content/uploads/2023/03/desease-asrxh-3.csv"
dataframe = pd.read_csv(file_url)
dataframe.shape
dataframe.head()
val_dataframe = dataframe.sample(frac=0.2, random_state=1337)
train_dataframe = dataframe.drop(val_dataframe.index)
print(
"Using %d samples for training and %d for validation"
% (len(train_dataframe), len(val_dataframe))
)
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("desease")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
val_ds = dataframe_to_dataset(val_dataframe)
for x, y in train_ds.take(1):
print("Input:", x)
print("desease:", y)
train_ds = train_ds.batch(32)
val_ds = val_ds.batch(32)
from tensorflow.keras.layers import IntegerLookup
from tensorflow.keras.layers import Normalization
from tensorflow.keras.layers import StringLookup
def encode_numerical_feature(feature, name, dataset):
# Create a Normalization layer for our feature
normalizer = Normalization()
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
# Learn the statistics of the data
normalizer.adapt(feature_ds)
# Normalize the input feature
encoded_feature = normalizer(feature)
return encoded_feature
def encode_categorical_feature(feature, name, dataset, is_string):
lookup_class = StringLookup if is_string else IntegerLookup
# Create a lookup layer which will turn strings into integer indices
lookup = lookup_class(output_mode="binary")
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
# Learn the set of possible string values and assign them a fixed integer index
lookup.adapt(feature_ds)
# Turn the string input into integer indices
encoded_feature = lookup(feature)
return encoded_feature
# Categorical features encoded as integers
ECG = keras.Input(shape=(1,), name="ECG", dtype="int64")
# Categorical feature encoded as string
GROUP = keras.Input(shape=(1,), name="GROUP", dtype="string")
# Numerical features
DWTBPM1 = keras.Input(shape=(1,), name="DWTBPM1")
DWTBPM2 = keras.Input(shape=(1,), name="DWTBPM2")
DWTBPM3 = keras.Input(shape=(1,), name="DWTBPM3")
DWTBPM4 = keras.Input(shape=(1,), name="DWTBPM4")
DWTBPM5 = keras.Input(shape=(1,), name="DWTBPM5")
DWTBPM6 = keras.Input(shape=(1,), name="DWTBPM6")
DWTBPM7 = keras.Input(shape=(1,), name="DWTBPM7")
DWTBPM8 = keras.Input(shape=(1,), name="DWTBPM8")
DWTBPM9 = keras.Input(shape=(1,), name="DWTBPM9")
all_inputs = [
ECG,
GROUP,
DWTBPM1,
DWTBPM2,
DWTBPM3,
DWTBPM4,
DWTBPM5,
DWTBPM6,
DWTBPM7,
DWTBPM8,
DWTBPM9,
]
# Integer categorical features
ECG_encoded = encode_categorical_feature(ECG, "ECG", train_ds, False)
# String categorical features
GROUP_encoded = encode_categorical_feature(GROUP, "GROUP", train_ds, True)
# Numerical features
DWTBPM1_encoded = encode_numerical_feature(DWTBPM1, "DWTBPM1", train_ds)
DWTBPM2_encoded = encode_numerical_feature(DWTBPM2, "DWTBPM2", train_ds)
DWTBPM3_encoded = encode_numerical_feature(DWTBPM3, "DWTBPM3", train_ds)
DWTBPM4_encoded = encode_numerical_feature(DWTBPM4, "DWTBPM4", train_ds)
DWTBPM5_encoded = encode_numerical_feature(DWTBPM5, "DWTBPM5", train_ds)
DWTBPM6_encoded = encode_numerical_feature(DWTBPM6, "DWTBPM6", train_ds)
DWTBPM7_encoded = encode_numerical_feature(DWTBPM7, "DWTBPM7", train_ds)
DWTBPM8_encoded = encode_numerical_feature(DWTBPM8, "DWTBPM8", train_ds)
DWTBPM9_encoded = encode_numerical_feature(DWTBPM9, "DWTBPM9", train_ds)
all_features = layers.concatenate(
[
ECG_encoded,
GROUP_encoded,
DWTBPM1_encoded,
DWTBPM2_encoded,
DWTBPM3_encoded,
DWTBPM4_encoded,
DWTBPM5_encoded,
DWTBPM6_encoded,
DWTBPM7_encoded,
DWTBPM8_encoded,
DWTBPM9_encoded,
]
)
x = layers.Dense(32, activation="relu")(all_features)
x = layers.Dropout(0.5)(x)
output = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(all_inputs, output)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
# `rankdir='LR'` is to make the graph horizontal.
keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
model.fit(train_ds, epochs=50, validation_data=val_ds)
sample = {
"ECG": 0,
"GROUP":"T",
"DWTBPM1": -5.20770,
"DWTBPM2": -8.77470,
"DWTBPM3": 6.07780,
"DWTBPM4": 6.33166,
"DWTBPM5": 0.36817,
"DWTBPM6": 0.21152,
"DWTBPM7": 0.13017,
"DWTBPM8": 0.22935,
"DWTBPM9": -1.12358,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = model.predict(input_dict)
print(
"This particular patient had a %.1f percent probability "
"of having a heart disease, as evaluated by our model." % (100 * predictions[0][0],)
sample = {
"ECG": 0,
"GROUP":"T",
"DWTBPM1": -5.20770,
"DWTBPM2": -8.77470,
"DWTBPM3": 6.07780,
"DWTBPM4": 6.33166,
"DWTBPM5": 0.36817,
"DWTBPM6": 0.21152,
"DWTBPM7": 0.13017,
"DWTBPM8": 0.22935,
"DWTBPM9": -1.12358,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = model.predict(input_dict)
print(
"This particular patient had a %.1f percent probability "
"of having a heart disease, as evaluated by our model." % (100 * predictions[0][0],)