diff --git a/API/run.py b/API/run.py index 387e9fe055a437973b3c66838e460896f449a49f..8673dc0439bb95204e47a61188dd2b503e9b6170 100644 --- a/API/run.py +++ b/API/run.py @@ -41,7 +41,7 @@ import matplotlib.pyplot as plt plt.rcParams.update({'figure.figsize':(9,7), 'figure.dpi':120}) import pmdarima as pm -#globally declare dataset +# globally declare dataset data_set =pd.DataFrame() # Tranning dataset with @@ -56,42 +56,22 @@ validate_data_set=pd.DataFrame() result_data_set=pd.DataFrame() +# Lstm model +model = Sequential() -def model_define(): - # use predefined tranning data set - global tranning_data_set - - # initialize the scaling method - scaler = MinMaxScaler() - - #filter items an array - - items = tranning_data_set.filter(['Items']) - items=items.values - - items_length = math.ceil(len(items)*0.8) - # Normalize the data set - scaled_data=scaler.fit_transform(items) - # Create the training dataset - train_data = scaled_data[0 : items_length, :] +def model_define(X_train): + # use predefined tranning data set + global model - X_train = [] - y_train = [] - for i in range(0, len(train_data)): - X_train.append(train_data[i - items_length : i, 0]) - y_train.append(train_data[i, 0]) + model.add(LSTM(units=512, return_sequences=True, activation='relu', input_shape=(X_train.shape[1], 1))) - # make X_train and y_train np array - X_train, y_train = np.array(X_train), np.array(y_train) + model.add(LSTM(units=256, activation='relu', return_sequences=False)) - # reshape the data - X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) - print(X_train) + model.add(Dense(units=1)) - - - + # compile the LSTM model + model.compile(optimizer="Adam", loss="mean_squared_error", metrics=['mae']) return "model_define" @@ -147,8 +127,9 @@ def start(): def tranning(): global tranning_data_set - + global model + # read incomming json data data=request.get_json() @@ -158,9 +139,47 @@ def tranning(): tranning_data_set =pd.read_json(data_arr) # tranning_data_set=tranning_data_set.T - print("Tranning Start") - # model_define() + items=tranning_data_set.filter(['Items']) + items_array=items.values + items_len=len(items_array) + + + + + scaler = MinMaxScaler() + scaled_data = scaler.fit_transform(items_array) + + items_len_half = math.ceil(len(items_array) * 0.5) + + train_data = scaled_data[0 : items_len, :] + # Create X_train and y_train + + X_train = [] + y_train = [] + for i in range(items_len_half, len(train_data)): + X_train.append(train_data[i - items_len_half : i, 0]) + y_train.append(train_data[i, 0]) + + + # make X_train and y_train np array + X_train, y_train = np.array(X_train), np.array(y_train) + + # reshape the data + X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) + + + print(X_train) + + model_define(X_train) + + # train the LSTM model + model.fit(X_train, y_train, + epochs=3, + batch_size=100, + verbose=1) + + model.summary() return tranning_data_set.to_json() @@ -168,6 +187,10 @@ def tranning(): @app.route('/prediction',methods=['POST']) def prediction(): + + + + return "prediction" @app.route('/optimize',methods=['POST']) @@ -175,10 +198,24 @@ def prediction(): def optimize(): return "optimize" -@app.route('/validate') +@app.route('/validate',methods=['POST']) def validate(): - return "validate" + + global validate_data_set + global model + + + # read incomming json data + data=request.get_json() + + # convert json data into pandas data structure + data_arr=json.dumps(data) + validate_data_set =pd.read_json(data_arr) + # tranning_data_set=tranning_data_set.T + + + return validate_data_set.to_json() if __name__ == "__main__": app.run() \ No newline at end of file diff --git a/items.ipynb b/items.ipynb index 5fb1b36057a45fd6096c71ffa676742943ae1cb2..aab2ba0a6c256b3f4e903f9e551d76e1f52019b3 100644 --- a/items.ipynb +++ b/items.ipynb @@ -364,7 +364,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.10.9 (tags/v3.10.9:1dd9be6, Dec 6 2022, 20:01:21) [MSC v.1934 64 bit (AMD64)]" }, "orig_nbformat": 4, "vscode": {