diff --git a/api/run.py b/api/run.py index eaad0e64f787d8ece20b7ebb3da49908fb1899bc..fc46fa1cb05aa3f63d5b4a671fa144d29a06e5ea 100644 --- a/api/run.py +++ b/api/run.py @@ -60,10 +60,10 @@ batch_size = 5 epochs = 10 accuracy=0 execute_time=0 -layer_1_units=100 -layer_2_units=50 -layer_3_units=50 -dense_units=1 +input_units=100 +hidden_layer_1=50 +hidden_layer_2=25 +output_units=1 # Scaler scaler = MinMaxScaler() @@ -134,14 +134,11 @@ def input_and_targert(data,feature_length): x_samples.append(x_sample) y_samples.append(y_sample) - # Reshape the input as a 3D (Number of samles,length of features,features) - - #Reshape input + # Reshape the input as a 3D (Number of Samples,time steps,features) X = np.array(x_samples) X=X.reshape(X.shape[0],X.shape[1],1) print("\n____Input Data Shape :____") print(X.shape) - # Reshape Target Y=np.array(y_samples) Y=Y.reshape(Y.shape[0],1) @@ -257,23 +254,24 @@ def setup(): #Add First LSTM Layer - - model.add(LSTM(units = layer_1_units, activation = 'relu', input_shape = (time_steps, features), return_sequences=True)) + model.add(LSTM(units = input_units, activation = 'relu', input_shape = (time_steps, features), return_sequences=True)) # Adding the Second hidden layer and the LSTM layer - - model.add(LSTM(units = layer_2_units, activation = 'relu', input_shape = (time_steps, features), return_sequences=True)) + model.add(LSTM(units = hidden_layer_1, activation = 'relu', input_shape = (time_steps, features), return_sequences=True)) # Adding the Third hidden layer and the LSTM layer - model.add(LSTM(units = layer_3_units, activation = 'relu', return_sequences=False )) - + model.add(LSTM(units = hidden_layer_2, activation = 'relu', return_sequences=False )) # Adding the output layer - model.add(Dense(units = dense_units)) - + model.add(Dense(units = output_units)) # Compiling model model.compile(optimizer = 'adam', loss = 'mean_squared_error') + print(model.input) + print(model.output) + + print(model.summary()) + # Measuring the time taken by the model to train start_time=time.time()