Spaces:
Sleeping
Sleeping
Daniel Varga
commited on
Commit
•
e9cac80
1
Parent(s):
fc67e21
playing around with prophet
Browse files- demo_prophet.py +58 -0
demo_prophet.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
from prophet import Prophet
|
4 |
+
|
5 |
+
# df = pd.read_csv('https://raw.githubusercontent.com/facebook/prophet/main/examples/example_wp_log_peyton_manning.csv')
|
6 |
+
|
7 |
+
cons_filename = 'pq_terheles_2021_adatok.tsv'
|
8 |
+
|
9 |
+
df = pd.read_csv(cons_filename, sep='\t', skipinitialspace=True, na_values='n/a', decimal=',')
|
10 |
+
df['Time'] = pd.to_datetime(df['Korrigált időpont'], format='%m/%d/%y %H:%M')
|
11 |
+
df = df.set_index('Time')
|
12 |
+
df['Consumption'] = df['Hatásos teljesítmény [kW]']
|
13 |
+
|
14 |
+
df['ds'] = df.index
|
15 |
+
df['y'] = df['Consumption']
|
16 |
+
|
17 |
+
|
18 |
+
split_date = '2021-07-01'
|
19 |
+
|
20 |
+
|
21 |
+
# TODO 15 minutes hardwired!
|
22 |
+
forecast_horizon = 7 * 24 * 4
|
23 |
+
|
24 |
+
# Split the data into training (past) and evaluation (future) sets
|
25 |
+
train_data = df[df['ds'] <= split_date]
|
26 |
+
eval_data = df[df['ds'] > split_date]
|
27 |
+
|
28 |
+
|
29 |
+
# Initialize and train the Prophet model using the training data
|
30 |
+
model = Prophet(seasonality_mode='multiplicative', growth='flat',
|
31 |
+
yearly_seasonality=True, weekly_seasonality=True, daily_seasonality=True)
|
32 |
+
|
33 |
+
model.fit(train_data)
|
34 |
+
|
35 |
+
# Create a DataFrame with future timestamps for the evaluation period
|
36 |
+
future = model.make_future_dataframe(periods=forecast_horizon, freq='15T', include_history=False)
|
37 |
+
|
38 |
+
# Make predictions for the evaluation period
|
39 |
+
forecast = model.predict(future)
|
40 |
+
|
41 |
+
# Calculate evaluation metrics (e.g., MAE, MSE, RMSE) by comparing eval_predictions with eval_data['y']
|
42 |
+
|
43 |
+
# For example, you can calculate MAE as follows:
|
44 |
+
from sklearn.metrics import mean_absolute_error
|
45 |
+
|
46 |
+
eval_data = eval_data[eval_data['ds'] <= forecast['ds'].max()]
|
47 |
+
|
48 |
+
mae = mean_absolute_error(eval_data['y'], forecast['yhat'])
|
49 |
+
|
50 |
+
# Print or store the evaluation metrics
|
51 |
+
print(f"Mean Absolute Error (MAE): {mae}")
|
52 |
+
|
53 |
+
|
54 |
+
fig1 = model.plot(forecast)
|
55 |
+
plt.show()
|
56 |
+
|
57 |
+
fig2 = model.plot_components(forecast)
|
58 |
+
plt.show()
|