From 8f4b724912fc70fd0fc8cdbf3abd5320b606ec44 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 17:22:48 +0100 Subject: [PATCH 01/63] Query TimedBelief rather than Power in api v1.3 tests Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_3/tests/test_api_v1_3.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3.py b/flexmeasures/api/v1_3/tests/test_api_v1_3.py index bdf2c7abd..33f508610 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3.py @@ -12,9 +12,8 @@ message_for_get_device_message, message_for_post_udi_event, ) -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.scheduling import handle_scheduling_exception from flexmeasures.utils.calculations import integrate_time_series @@ -97,13 +96,13 @@ def test_post_udi_event_and_get_device_message( scheduler_source is not None ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.sensor_id == sensor.id) - .filter(Power.data_source_id == scheduler_source.id) + TimedBelief.query.filter(TimedBelief.sensor_id == sensor.id) + .filter(TimedBelief.source_id == scheduler_source.id) .all() ) consumption_schedule = pd.Series( - [-v.value for v in power_values], - index=pd.DatetimeIndex([v.datetime for v in power_values], freq=resolution), + [-v.event_value for v in power_values], + index=pd.DatetimeIndex([v.event_start for v in power_values], freq=resolution), ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative assert ( len(consumption_schedule) From 61e2fd881981357ad25f772b5e614453a94ccbdc Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 17:50:51 +0100 Subject: [PATCH 02/63] Query TimedBelief rather than Power in api v1.3 implementations Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_3/implementations.py | 49 ++++-------------------- 1 file changed, 8 insertions(+), 41 deletions(-) diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index 2ecd638de..41a3d697c 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -8,7 +8,6 @@ import numpy as np import pandas as pd from rq.job import Job, NoSuchJobError -from sqlalchemy import and_, func from flexmeasures.utils.entity_address_utils import ( parse_entity_address, @@ -39,9 +38,9 @@ parse_isodate_str, ) from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.queries.utils import simplify_index from flexmeasures.data.services.resources import has_assets, can_access_asset from flexmeasures.data.services.scheduling import create_scheduling_job @@ -152,46 +151,14 @@ def get_device_message_response(generic_asset_name_groups, duration): message + f'no data is known from "{schedule_data_source_name}".' ) - # todo: after moving the Asset's Power data to the corresponding Sensor's TimedBeliefs, - # the query below should be replaced by: - # sensor.search_beliefs( - # event_starts_after=schedule_start, - # event_ends_before=schedule_start + planning_horizon, - # source=scheduler_source, - # most_recent_beliefs_only=True, - # ) - - # Subquery to get the most recent schedule only - subq = ( - db.session.query( - Power.datetime, - Power.data_source_id, - func.min(Power.horizon).label("most_recent_belief_horizon"), - ) - .filter(Power.sensor_id == sensor_id) - .group_by(Power.datetime, Power.data_source_id) - .subquery() - ) - power_values = ( - Power.query.filter(Power.sensor_id == sensor_id) - .filter(Power.data_source_id == scheduler_source.id) - .filter(Power.datetime >= schedule_start) - .filter(Power.datetime < schedule_start + planning_horizon) - .order_by(Power.datetime.asc()) - .join( - subq, - and_( - Power.datetime == subq.c.datetime, - Power.data_source_id == subq.c.data_source_id, - Power.horizon == subq.c.most_recent_belief_horizon, - ), - ) - .all() + power_values = sensor.search_beliefs( + event_starts_after=schedule_start, + event_ends_before=schedule_start + planning_horizon, + source=scheduler_source, + most_recent_beliefs_only=True, ) - consumption_schedule = pd.Series( - [-v.value for v in power_values], - index=pd.DatetimeIndex([v.datetime for v in power_values]), - ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative + # For consumption schedules, positive values denote consumption. For the db, consumption is negative + consumption_schedule = -simplify_index(power_values)["event_value"] if consumption_schedule.empty: return unknown_schedule( message + "the schedule was not found in the database." From 07a80595f5eff5d519f18058bc919b96d8bcbf61 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 18:48:56 +0100 Subject: [PATCH 03/63] Query TimedBelief rather than Power in user services tests Signed-off-by: F.N. Claessen --- flexmeasures/data/tests/test_user_services.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/tests/test_user_services.py b/flexmeasures/data/tests/test_user_services.py index 37e4a0bd1..a9f856c84 100644 --- a/flexmeasures/data/tests/test_user_services.py +++ b/flexmeasures/data/tests/test_user_services.py @@ -9,8 +9,9 @@ delete_user, InvalidFlexMeasuresUser, ) -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief def test_create_user( @@ -89,7 +90,9 @@ def test_delete_user(fresh_db, setup_roles_users_fresh_db, app): ).all() asset_ids = [asset.id for asset in user_assets_with_measurements_before] for asset_id in asset_ids: - num_power_measurements = Power.query.filter(Power.sensor_id == asset_id).count() + num_power_measurements = TimedBelief.query.filter( + TimedBelief.sensor_id == asset_id + ).count() assert num_power_measurements == 96 delete_user(prosumer) assert find_user_by_email("test_prosumer_user@seita.nl") is None @@ -97,5 +100,7 @@ def test_delete_user(fresh_db, setup_roles_users_fresh_db, app): assert len(user_assets_after) == 0 assert User.query.count() == num_users_before - 1 for asset_id in asset_ids: - num_power_measurements = Power.query.filter(Power.sensor_id == asset_id).count() + num_power_measurements = TimedBelief.query.filter( + TimedBelief.sensor_id == asset_id + ).count() assert num_power_measurements == 0 From 0b9466407ba83c49ddf39dc5cdd32797dc2d1a81 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 19:16:24 +0100 Subject: [PATCH 04/63] Query TimedBelief rather than Power in query tests Signed-off-by: F.N. Claessen --- flexmeasures/data/tests/test_queries.py | 32 +++++++++++++++---------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 0605fa3e0..e19fc36e6 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -6,7 +6,6 @@ import pytz import timely_beliefs as tb -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.queries.utils import ( @@ -41,9 +40,13 @@ ) def test_collect_power(db, app, query_start, query_end, num_values, setup_test_data): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - data = Power.query.filter(Power.sensor_id == wind_device_1.id).all() + data = TimedBelief.query.filter(TimedBelief.sensor_id == wind_device_1.id).all() print(data) - bdf: tb.BeliefsDataFrame = Power.search(wind_device_1.name, query_start, query_end) + bdf: tb.BeliefsDataFrame = TimedBelief.search( + wind_device_1.name, + event_starts_after=query_start, + event_ends_before=query_end, + ).convert_index_from_belief_time_to_horizon() print(bdf) assert ( bdf.index.names[0] == "event_start" @@ -52,8 +55,8 @@ def test_collect_power(db, app, query_start, query_end, num_values, setup_test_d bdf.index.get_level_values("belief_horizon") ) # dtype of belief_horizon is timedelta64[ns], so the minimum horizon on an empty BeliefsDataFrame is NaT instead of NaN assert len(bdf) == num_values - for v1, v2 in zip(bdf.values, data): - assert abs(v1[0] - v2.value) < 10 ** -6 + for v1, v2 in zip(bdf["event_value"].tolist(), data): + assert abs(v1 - v2.event_value) < 10 ** -6 @pytest.mark.parametrize( @@ -85,13 +88,16 @@ def test_collect_power(db, app, query_start, query_end, num_values, setup_test_d ), ], ) -def test_collect_power_resampled( +def tesfijfijft_collect_power_resampled( db, app, query_start, query_end, resolution, num_values, setup_test_data ): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - bdf: tb.BeliefsDataFrame = Power.search( - wind_device_1.name, query_start, query_end, resolution=resolution - ) + bdf: tb.BeliefsDataFrame = TimedBelief.search( + wind_device_1.name, + event_starts_after=query_start, + event_ends_before=query_end, + resolution=resolution, + ).convert_index_from_belief_time_to_horizon() print(bdf) assert len(bdf) == num_values @@ -206,12 +212,12 @@ def test_multiplication_with_both_empty_dataframe(): def test_simplify_index(setup_test_data, check_empty_frame): """Check whether simplify_index retains the event resolution.""" wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - bdf: tb.BeliefsDataFrame = Power.search( + bdf: tb.BeliefsDataFrame = TimedBelief.search( wind_device_1.name, - datetime(2015, 1, 1, tzinfo=pytz.utc), - datetime(2015, 1, 2, tzinfo=pytz.utc), + event_starts_after=datetime(2015, 1, 1, tzinfo=pytz.utc), + event_ends_before=datetime(2015, 1, 2, tzinfo=pytz.utc), resolution=timedelta(minutes=15), - ) + ).convert_index_from_belief_time_to_horizon() if check_empty_frame: # We empty the BeliefsDataFrame, which retains the metadata such as sensor and resolution bdf = bdf.iloc[0:0, :] From f3504bd4c82df03c544edd33e9cefc82d28f1e20 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 20:13:20 +0100 Subject: [PATCH 05/63] Query TimedBelief rather than Power in forecasting tests Signed-off-by: F.N. Claessen --- flexmeasures/data/services/forecasting.py | 2 +- .../data/tests/test_forecasting_jobs.py | 36 +++++++++---------- .../tests/test_forecasting_jobs_fresh_db.py | 28 +++++++-------- 3 files changed, 33 insertions(+), 33 deletions(-) diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 1502bada1..0dc111433 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -245,7 +245,7 @@ def make_rolling_viewpoint_forecasts( click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) ts_value_forecasts = [ - timed_value_type( + Power( use_legacy_kwargs=False, event_start=dt, belief_horizon=horizon, diff --git a/flexmeasures/data/tests/test_forecasting_jobs.py b/flexmeasures/data/tests/test_forecasting_jobs.py index bf007a459..d86ecd38a 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs.py +++ b/flexmeasures/data/tests/test_forecasting_jobs.py @@ -8,7 +8,7 @@ from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, @@ -18,7 +18,7 @@ def custom_model_params(): - """ little training as we have little data, turn off transformations until they let this test run (TODO) """ + """little training as we have little data, turn off transformations until they let this test run (TODO)""" return dict( training_and_testing_period=timedelta(hours=2), outcome_var_transformation=None, @@ -39,12 +39,12 @@ def check_aggregate(overall_expected: int, horizon: timedelta, sensor_id: int): """Check that the expected number of forecasts were made for the given horizon, and check that each forecast is a number.""" all_forecasts = ( - Power.query.filter(Power.sensor_id == sensor_id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == sensor_id) + .filter(TimedBelief.belief_horizon == horizon) .all() ) assert len(all_forecasts) == overall_expected - assert all([not np.isnan(f.value) for f in all_forecasts]) + assert all([not np.isnan(f.event_value) for f in all_forecasts]) def test_forecasting_an_hour_of_wind(db, app, setup_test_data): @@ -74,11 +74,11 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): assert get_data_source() is not None forecasts = ( - Power.query.filter(Power.sensor_id == wind_device_1.id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == wind_device_1.id) + .filter(TimedBelief.belief_horizon == horizon) .filter( - (Power.datetime >= as_server_time(datetime(2015, 1, 1, 7))) - & (Power.datetime < as_server_time(datetime(2015, 1, 1, 8))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 7))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 8))) ) .all() ) @@ -91,12 +91,12 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ last_power_datetime = ( ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == timedelta(hours=0)) - .order_by(Power.datetime.desc()) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == timedelta(hours=0)) + .order_by(TimedBelief.event_start.desc()) ) .first() - .datetime + .event_start ) # datetime index of the last power value 11.45pm (Jan 1st) # makes 4 forecasts, 1 of which is for a new datetime index @@ -120,9 +120,9 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == horizon) - .filter(Power.datetime > last_power_datetime) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == horizon) + .filter(TimedBelief.event_start > last_power_datetime) .all() ) assert len(forecasts) == 1 @@ -188,7 +188,7 @@ def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data) def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): - """ This one (as well as the fallback) should fail as the horizon is invalid.""" + """This one (as well as the fallback) should fail as the horizon is invalid.""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( timed_value_type=Power, @@ -203,7 +203,7 @@ def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): def test_failed_unknown_model(app, clean_redis, setup_test_data): - """ This one should fail because we use a model search term which yields no model configurator.""" + """This one should fail because we use a model search term which yields no model configurator.""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() horizon = timedelta(hours=1) diff --git a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py index 27ff1df45..e7b1bd815 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py @@ -4,7 +4,7 @@ from sqlalchemy.orm import Query from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, handle_forecasting_exception, @@ -37,11 +37,11 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.sensor_id == wind_device2.id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == wind_device2.id) + .filter(TimedBelief.belief_horizon == horizon) .filter( - (Power.datetime >= as_server_time(datetime(2015, 1, 1, 11))) - & (Power.datetime < as_server_time(datetime(2015, 1, 1, 14))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 11))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 14))) ) .all() ) @@ -69,11 +69,11 @@ def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == horizon) .filter( - (Power.datetime >= as_server_time(datetime(2015, 1, 1, 13))) - & (Power.datetime < as_server_time(datetime(2015, 1, 1, 15))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 13))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 15))) ) .all() ) @@ -127,17 +127,17 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( def make_query(the_horizon_hours: int) -> Query: the_horizon = timedelta(hours=the_horizon_hours) return ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == the_horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == the_horizon) .filter( ( - Power.datetime + TimedBelief.event_start >= as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours) ) ) & ( - Power.datetime + TimedBelief.event_start < as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours + 2) ) @@ -155,7 +155,7 @@ def make_query(the_horizon_hours: int) -> Query: existing_data = make_query(the_horizon_hours=0).all() for ed, fd in zip(existing_data, forecasts): - assert ed.value == fd.value + assert ed.event_value == fd.event_value # Now to check which models actually got to work. # We check which data sources do and do not exist by now: From d1968473a00833b3d4196a11798ea466c423ad9d Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 20:43:01 +0100 Subject: [PATCH 06/63] Query TimedBelief rather than Power in scheduling tests Signed-off-by: F.N. Claessen --- flexmeasures/data/tests/test_scheduling_jobs.py | 9 ++++----- .../data/tests/test_scheduling_jobs_fresh_db.py | 11 +++++------ 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/flexmeasures/data/tests/test_scheduling_jobs.py b/flexmeasures/data/tests/test_scheduling_jobs.py index 6ca2f1c02..55add185b 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs.py +++ b/flexmeasures/data/tests/test_scheduling_jobs.py @@ -2,8 +2,7 @@ from datetime import datetime, timedelta from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq, exception_reporter from flexmeasures.data.services.scheduling import create_scheduling_job from flexmeasures.utils.time_utils import as_server_time @@ -41,9 +40,9 @@ def test_scheduling_a_battery(db, app, add_battery_assets, setup_test_data): ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.sensor_id == battery.id) - .filter(Power.data_source_id == scheduler_source.id) + TimedBelief.query.filter(TimedBelief.sensor_id == battery.id) + .filter(TimedBelief.source_id == scheduler_source.id) .all() ) - print([v.value for v in power_values]) + print([v.event_value for v in power_values]) assert len(power_values) == 96 diff --git a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py index 499760ed3..9b9efbddd 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py @@ -3,9 +3,8 @@ import numpy as np import pandas as pd -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.scheduling import create_scheduling_job from flexmeasures.data.tests.utils import work_on_rq, exception_reporter from flexmeasures.utils.time_utils import as_server_time @@ -63,13 +62,13 @@ def test_scheduling_a_charging_station( ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.sensor_id == charging_station.id) - .filter(Power.data_source_id == scheduler_source.id) + TimedBelief.query.filter(TimedBelief.sensor_id == charging_station.id) + .filter(TimedBelief.source_id == scheduler_source.id) .all() ) consumption_schedule = pd.Series( - [-v.value for v in power_values], - index=pd.DatetimeIndex([v.datetime for v in power_values]), + [-v.event_value for v in power_values], + index=pd.DatetimeIndex([v.event_start for v in power_values]), ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative assert len(consumption_schedule) == 96 print(consumption_schedule.head(12)) From 9a7f32e05f38e6b07e888e25659b2d4bb9f9c99b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 20:51:28 +0100 Subject: [PATCH 07/63] Query TimedBelief rather than Power in api v1 tests Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/tests/test_api_v1.py | 18 ++++++++---------- flexmeasures/api/v1/tests/utils.py | 24 ++++++++++++++---------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/flexmeasures/api/v1/tests/test_api_v1.py b/flexmeasures/api/v1/tests/test_api_v1.py index 875a3f3d9..38e57baf3 100644 --- a/flexmeasures/api/v1/tests/test_api_v1.py +++ b/flexmeasures/api/v1/tests/test_api_v1.py @@ -199,35 +199,33 @@ def test_get_meter_data(db, app, client, message): [ pd.DataFrame.from_dict( dict( - value=[(100.0 + i) for i in range(6)], - datetime=[ + event_value=[(100.0 + i) for i in range(6)], + event_start=[ isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i) for i in range(6) ], - data_source_id=1, + source_id=1, ) ), pd.DataFrame.from_dict( dict( - value=[(1000.0 - 10 * i) for i in range(6)], - datetime=[ + event_value=[(1000.0 - 10 * i) for i in range(6)], + event_start=[ isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i) for i in range(6) ], - data_source_id=2, + source_id=2, ) ), ] ) if "source" in message: source_ids = validate_user_sources(message["source"]) - expected_values = expected_values[ - expected_values["data_source_id"].isin(source_ids) - ] + expected_values = expected_values[expected_values["source_id"].isin(source_ids)] expected_values = expected_values.set_index( - ["datetime", "data_source_id"] + ["event_start", "source_id"] ).sort_index() # check whether conftest.py did its job setting up the database with expected values diff --git a/flexmeasures/api/v1/tests/utils.py b/flexmeasures/api/v1/tests/utils.py index 10f87e529..a9ef31e91 100644 --- a/flexmeasures/api/v1/tests/utils.py +++ b/flexmeasures/api/v1/tests/utils.py @@ -7,8 +7,7 @@ import pandas as pd from flexmeasures.api.common.utils.validators import validate_user_sources -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief def message_for_get_meter_data( @@ -119,21 +118,26 @@ def verify_power_in_db( ) resolution = sensor.event_resolution query = ( - db.session.query(Power.datetime, Power.value, Power.data_source_id) - .filter((Power.datetime > start - resolution) & (Power.datetime < end)) - .filter(Power.horizon == horizon) + db.session.query( + TimedBelief.event_start, TimedBelief.event_value, TimedBelief.source_id + ) + .filter( + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) + ) + .filter(TimedBelief.belief_horizon == horizon) .join(Sensor) - .filter(Power.sensor_id == Sensor.id) + .filter(TimedBelief.sensor_id == Sensor.id) .filter(Sensor.name == sensor.name) ) if "source" in message: source_ids = validate_user_sources(message["source"]) - query = query.filter(Power.data_source_id.in_(source_ids)) + query = query.filter(TimedBelief.source_id.in_(source_ids)) df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] ) - df = df.set_index(["datetime", "data_source_id"]).sort_index() + df = df.set_index(["event_start", "source_id"]).sort_index() if swapped_sign: - df["value"] = -df["value"] + df["event_value"] = -df["event_value"] - assert df["value"].to_list() == expected_df["value"].to_list() + assert df["event_value"].to_list() == expected_df["event_value"].to_list() From 0651806a868d066a71206b3496895a2ac8724801 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 21:47:15 +0100 Subject: [PATCH 08/63] Simplify data deletion, like, by a lot Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_delete.py | 53 ++------ flexmeasures/data/scripts/data_gen.py | 179 +++----------------------- 2 files changed, 31 insertions(+), 201 deletions(-) diff --git a/flexmeasures/cli/data_delete.py b/flexmeasures/cli/data_delete.py index e66e79de3..690b470b8 100644 --- a/flexmeasures/cli/data_delete.py +++ b/flexmeasures/cli/data_delete.py @@ -117,20 +117,9 @@ def delete_user_and_data(email: str, force: bool): def confirm_deletion( structure: bool = False, data: bool = False, - asset_type: Optional[str] = None, is_by_id: bool = False, ): affected_classes = get_affected_classes(structure, data) - if data and asset_type: - if asset_type == "Asset": - affected_classes.remove(Price) - affected_classes.remove(Weather) - elif asset_type == "Market": - affected_classes.remove(Power) - affected_classes.remove(Weather) - elif asset_type == "WeatherSensor": - affected_classes.remove(Power) - affected_classes.remove(Price) prompt = "This deletes all %s entries from %s.\nDo you want to continue?" % ( " and ".join( ", ".join( @@ -168,32 +157,23 @@ def delete_structure(force): @fm_delete_data.command("measurements") @with_appcontext @click.option( - "--asset-type", - help="Depopulate (time series) data for a specific generic asset type only." - "Follow up with Asset, Market or WeatherSensor.", -) -@click.option( - "--asset-id", + "--sensor-id", type=int, - help="Delete (time series) data for a single asset only. Follow up with the asset's ID. " - "We still need --asset-type, as well, so we know where to look this ID up.", + help="Delete (time series) data for a single sensor only. Follow up with the sensor's ID.", ) @click.option( "--force/--no-force", default=False, help="Skip warning about consequences." ) def delete_measurements( force: bool, - asset_type: Optional[str] = None, - asset_id: Optional[int] = None, + sensor_id: Optional[int] = None, ): - """ Delete measurements (with horizon <= 0).""" + """Delete measurements (ex-post beliefs, i.e. with belief_horizon <= 0).""" if not force: - confirm_deletion( - data=True, asset_type=asset_type, is_by_id=asset_id is not None - ) + confirm_deletion(data=True, is_by_id=sensor_id is not None) from flexmeasures.data.scripts.data_gen import depopulate_measurements - depopulate_measurements(app.db, asset_type, asset_id) + depopulate_measurements(app.db, sensor_id) @fm_delete_data.command("prognoses") @@ -202,29 +182,20 @@ def delete_measurements( "--force/--no-force", default=False, help="Skip warning about consequences." ) @click.option( - "--asset-type", - help="Depopulate (time series) data for a specific generic asset type only. " - "Follow up with Asset, Market or WeatherSensor.", -) -@click.option( - "--asset-id", + "--sensor-id", type=int, - help="Depopulate (time series) data for a single asset only. Follow up with the asset's ID. " - "Use in combination with --asset-type, so we know where to look this name up.", + help="Delete (time series) data for a single sensor only. Follow up with the sensor's ID. ", ) def delete_prognoses( force: bool, - asset_type: Optional[str] = None, - asset_id: Optional[int] = None, + sensor_id: Optional[int] = None, ): - """Delete forecasts and schedules (forecasts > 0).""" + """Delete forecasts and schedules (ex-ante beliefs, i.e. with belief_horizon > 0).""" if not force: - confirm_deletion( - data=True, asset_type=asset_type, is_by_id=asset_id is not None - ) + confirm_deletion(data=True, is_by_id=sensor_id is not None) from flexmeasures.data.scripts.data_gen import depopulate_prognoses - depopulate_prognoses(app.db, asset_type, asset_id) + depopulate_prognoses(app.db, sensor_id) app.cli.add_command(fm_delete_data) diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index a38cbed2a..ce52d1119 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -411,186 +411,45 @@ def depopulate_structure(db: SQLAlchemy): @as_transaction def depopulate_measurements( db: SQLAlchemy, - old_sensor_class_name: Optional[str] = None, - old_sensor_id: Optional[id] = None, + sensor_id: Optional[id] = None, ): - click.echo("Depopulating (time series) data from the database %s ..." % db.engine) - num_prices_deleted = 0 - num_power_measurements_deleted = 0 - num_weather_measurements_deleted = 0 - - # TODO: simplify this when sensors moved to one unified table - - if old_sensor_id is None: - if old_sensor_class_name is None or old_sensor_class_name == "Market": - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon <= timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "Asset": - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon <= timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon <= timedelta(hours=0)) - .delete() - ) - else: - if old_sensor_class_name is None: - click.echo( - "If you specify --asset-name, please also specify --asset-type, so we can look it up." - ) - return - if old_sensor_class_name == "Market": - market = ( - db.session.query(Market) - .filter(Market.id == old_sensor_id) - .one_or_none() - ) - if market is not None: - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon <= timedelta(hours=0)) - .filter(Price.sensor_id == market.id) - .delete() - ) - else: - num_prices_deleted = 0 + click.echo("Deleting (time series) data from the database %s ..." % db.engine) - elif old_sensor_class_name == "Asset": - asset = ( - db.session.query(Asset).filter(Asset.id == old_sensor_id).one_or_none() - ) - if asset is not None: - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon <= timedelta(hours=0)) - .filter(Power.sensor_id == asset.id) - .delete() - ) - else: - num_power_measurements_deleted = 0 - - elif old_sensor_class_name == "WeatherSensor": - sensor = ( - db.session.query(WeatherSensor) - .filter(WeatherSensor.id == old_sensor_id) - .one_or_none() - ) - if sensor is not None: - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon <= timedelta(hours=0)) - .filter(Weather.sensor_id == sensor.id) - .delete() - ) - else: - num_weather_measurements_deleted = 0 + query = db.session.query(TimedBelief).filter( + TimedBelief.belief_horizon <= timedelta(hours=0) + ) + if sensor_id is not None: + query = query.filter(TimedBelief.sensor_id == sensor_id) + num_measurements_deleted = query.delete() - click.echo("Deleted %d Prices" % num_prices_deleted) - click.echo("Deleted %d Power Measurements" % num_power_measurements_deleted) - click.echo("Deleted %d Weather Measurements" % num_weather_measurements_deleted) + click.echo("Deleted %d measurements (ex-post beliefs)" % num_measurements_deleted) @as_transaction def depopulate_prognoses( db: SQLAlchemy, - old_sensor_class_name: Optional[str] = None, - old_sensor_id: Optional[id] = None, + sensor_id: Optional[id] = None, ): click.echo( - "Depopulating (time series) forecasts and schedules data from the database %s ..." + "Deleting (time series) forecasts and schedules data from the database %s ..." % db.engine ) - num_prices_deleted = 0 - num_power_measurements_deleted = 0 - num_weather_measurements_deleted = 0 # Clear all jobs num_forecasting_jobs_deleted = app.queues["forecasting"].empty() num_scheduling_jobs_deleted = app.queues["scheduling"].empty() # Clear all forecasts (data with positive horizon) - if old_sensor_id is None: - if old_sensor_class_name is None or old_sensor_class_name == "Market": - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon > timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "Asset": - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon > timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon > timedelta(hours=0)) - .delete() - ) - else: - click.echo( - "Depopulating (time series) forecasts and schedules for %s from the database %s ..." - % (old_sensor_id, db.engine) - ) - - if old_sensor_class_name == "Market": - market = ( - db.session.query(Market) - .filter(Market.id == old_sensor_id) - .one_or_none() - ) - if market is not None: - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon > timedelta(hours=0)) - .filter(Price.sensor_id == market.id) - .delete() - ) - else: - num_prices_deleted = 0 - - if old_sensor_class_name == "Asset": - asset = ( - db.session.query(Asset).filter(Asset.id == old_sensor_id).one_or_none() - ) - if asset is not None: - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon > timedelta(hours=0)) - .filter(Power.sensor_id == asset.id) - .delete() - ) - else: - num_power_measurements_deleted = 0 + query = db.session.query(TimedBelief).filter( + TimedBelief.belief_horizon > timedelta(hours=0) + ) + if sensor_id is not None: + query = query.filter(TimedBelief.sensor_id == sensor_id) + num_forecasts_deleted = query.delete() - if old_sensor_class_name == "WeatherSensor": - sensor = ( - db.session.query(WeatherSensor) - .filter(WeatherSensor.id == old_sensor_id) - .one_or_none() - ) - if sensor is not None: - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon > timedelta(hours=0)) - .filter(Weather.sensor_id == sensor.id) - .delete() - ) - else: - num_weather_measurements_deleted = 0 click.echo("Deleted %d Forecast Jobs" % num_forecasting_jobs_deleted) click.echo("Deleted %d Schedule Jobs" % num_scheduling_jobs_deleted) - click.echo("Deleted %d Price Forecasts" % num_prices_deleted) - click.echo("Deleted %d Power Forecasts" % num_power_measurements_deleted) - click.echo("Deleted %d Weather Forecasts" % num_weather_measurements_deleted) + click.echo("Deleted %d forecasts (ex-ante beliefs)" % num_forecasts_deleted) def reset_db(db: SQLAlchemy): @@ -706,5 +565,5 @@ def get_affected_classes(structure: bool = True, data: bool = False) -> List: DataSource, ] if data: - affected_classes += [TimedBelief, Power, Price, Weather] + affected_classes += [TimedBelief] return affected_classes From 923406e14979d27a771c224f67d95d277401918b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 21:51:09 +0100 Subject: [PATCH 09/63] Count ex-ante TimedBeliefs after populating time series forecasts Signed-off-by: F.N. Claessen --- flexmeasures/data/scripts/data_gen.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index ce52d1119..fe478538f 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -364,16 +364,10 @@ def populate_time_series_forecasts( # noqa: C901 db.session.add(belief) click.echo( - "DB now has %d Power Forecasts" - % db.session.query(Power).filter(Power.horizon > timedelta(hours=0)).count() - ) - click.echo( - "DB now has %d Price Forecasts" - % db.session.query(Price).filter(Price.horizon > timedelta(hours=0)).count() - ) - click.echo( - "DB now has %d Weather Forecasts" - % db.session.query(Weather).filter(Weather.horizon > timedelta(hours=0)).count() + "DB now has %d forecasts" + % db.session.query(TimedBelief) + .filter(TimedBelief.belief_horizon > timedelta(hours=0)) + .count() ) From acfbc8a3c91b9e34344c21faa3fb8139113cbbc4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 21:55:33 +0100 Subject: [PATCH 10/63] Query TimedBelief rather than Price in api v1_1 tests Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_1/tests/utils.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/flexmeasures/api/v1_1/tests/utils.py b/flexmeasures/api/v1_1/tests/utils.py index 6e0efe36f..d0083a76d 100644 --- a/flexmeasures/api/v1_1/tests/utils.py +++ b/flexmeasures/api/v1_1/tests/utils.py @@ -9,8 +9,7 @@ from flask import current_app from flexmeasures.api.common.schemas.sensors import SensorField -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief def message_for_get_prognosis( @@ -157,19 +156,25 @@ def verify_prices_in_db(post_message, values, db, swapped_sign: bool = False): sensor = SensorField("market", "fm0").deserialize(post_message["market"]) resolution = sensor.event_resolution query = ( - db.session.query(Price.value, Price.horizon) - .filter((Price.datetime > start - resolution) & (Price.datetime < end)) - .filter(Price.horizon == horizon - (end - (Price.datetime + resolution))) + db.session.query(TimedBelief.event_value, TimedBelief.belief_horizon) + .filter( + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) + ) + .filter( + TimedBelief.belief_horizon + == horizon - (end - (TimedBelief.event_start + resolution)) + ) .join(Sensor) - .filter(Price.sensor_id == Sensor.id) + .filter(TimedBelief.sensor_id == Sensor.id) .filter(Sensor.name == sensor.name) ) df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] ) if swapped_sign: - df["value"] = -df["value"] - assert df.value.tolist() == values + df["event_value"] = -df["event_value"] + assert df["event_value"].tolist() == values def get_forecasting_jobs(timed_value_type: str) -> List[Job]: From d8929957674a13e66c9f84ee80afe58ac8b27f59 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 22:00:56 +0100 Subject: [PATCH 11/63] Query TimedBelief rather than Power/Price/Weather in Resource.load_sensor_data Signed-off-by: F.N. Claessen --- flexmeasures/data/services/resources.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index d4a7f8247..28b9459cd 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -31,7 +31,7 @@ assets_share_location, ) from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.weather import Weather, WeatherSensorType from flexmeasures.data.models.user import User from flexmeasures.data.queries.utils import simplify_index @@ -484,8 +484,8 @@ def load_sensor_data( ) # Query the sensors - resource_data: Dict[str, tb.BeliefsDataFrame] = sensor_type.search( - old_sensor_names=list(names_of_resource_sensors), + resource_data: Dict[str, tb.BeliefsDataFrame] = TimedBelief.search( + list(names_of_resource_sensors), event_starts_after=start, event_ends_before=end, horizons_at_least=belief_horizon_window[0], From 70bed393af25593949bf9eb9deade745705d809c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 22:10:17 +0100 Subject: [PATCH 12/63] Query TimedBelief rather than Power/Price/Weather in api v2.0 tests Signed-off-by: F.N. Claessen --- flexmeasures/api/v2_0/tests/utils.py | 45 ++++++++++------------------ 1 file changed, 15 insertions(+), 30 deletions(-) diff --git a/flexmeasures/api/v2_0/tests/utils.py b/flexmeasures/api/v2_0/tests/utils.py index b0608d509..621ffd2e4 100644 --- a/flexmeasures/api/v2_0/tests/utils.py +++ b/flexmeasures/api/v2_0/tests/utils.py @@ -6,10 +6,7 @@ import timely_beliefs as tb from flexmeasures.api.common.schemas.sensors import SensorField -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.models.weather import Weather from flexmeasures.data.services.users import find_user_by_email from flexmeasures.api.v1_1.tests.utils import ( message_for_post_price_data as v1_1_message_for_post_price_data, @@ -78,17 +75,6 @@ def verify_sensor_data_in_db( swapped_sign: bool = False, ): """util method to verify that sensor data ended up in the database""" - if entity_type == "sensor": - data_type = TimedBelief - elif entity_type == "connection": - data_type = Power - elif entity_type == "market": - data_type = Price - elif entity_type == "weather_sensor": - data_type = Weather - else: - raise ValueError("Unknown entity type") - start = parse_datetime(post_message["start"]) end = start + parse_duration(post_message["duration"]) sensor: Sensor = SensorField(entity_type, fm_scheme).deserialize( @@ -98,39 +84,38 @@ def verify_sensor_data_in_db( if "horizon" in post_message: horizon = parse_duration(post_message["horizon"]) query = ( - db.session.query(data_type.datetime, data_type.value, data_type.horizon) + db.session.query( + TimedBelief.event_start, + TimedBelief.event_value, + TimedBelief.belief_horizon, + ) .filter( - (data_type.datetime > start - resolution) & (data_type.datetime < end) + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) ) - .filter(data_type.horizon == horizon) + .filter(TimedBelief.belief_horizon == horizon) .join(Sensor) .filter(Sensor.name == sensor.name) ) else: query = ( db.session.query( - data_type.datetime, - data_type.value, - data_type.horizon, + TimedBelief.event_start, + TimedBelief.event_value, + TimedBelief.belief_horizon, ) .filter( - (data_type.datetime > start - resolution) & (data_type.datetime < end) + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) ) - # .filter(data_type.horizon == (data_type.datetime + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function + # .filter(TimedBelief.belief_horizon == (TimedBelief.event_start + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function .join(Sensor) .filter(Sensor.name == sensor.name) ) - # todo: after basing Price on TimedBelief, we should be able to get a BeliefsDataFrame from the query directly + # todo: after basing sensor data on TimedBelief, we should be able to get a BeliefsDataFrame from the query directly df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] ) - df = df.rename( - columns={ - "value": "event_value", - "datetime": "event_start", - "horizon": "belief_horizon", - } - ) bdf = tb.BeliefsDataFrame(df, sensor=sensor, source="Some source") if "prior" in post_message: prior = parse_datetime(post_message["prior"]) From 8289bba127b54a83e84c0f535c9c86465aeaa3b1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 22:15:23 +0100 Subject: [PATCH 13/63] Refactor: simplify duplicate query construction Signed-off-by: F.N. Claessen --- flexmeasures/api/v2_0/tests/utils.py | 44 ++++++++++------------------ 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/flexmeasures/api/v2_0/tests/utils.py b/flexmeasures/api/v2_0/tests/utils.py index 621ffd2e4..9d51bd748 100644 --- a/flexmeasures/api/v2_0/tests/utils.py +++ b/flexmeasures/api/v2_0/tests/utils.py @@ -81,37 +81,23 @@ def verify_sensor_data_in_db( post_message[entity_type] ) resolution = sensor.event_resolution - if "horizon" in post_message: - horizon = parse_duration(post_message["horizon"]) - query = ( - db.session.query( - TimedBelief.event_start, - TimedBelief.event_value, - TimedBelief.belief_horizon, - ) - .filter( - (TimedBelief.event_start > start - resolution) - & (TimedBelief.event_start < end) - ) - .filter(TimedBelief.belief_horizon == horizon) - .join(Sensor) - .filter(Sensor.name == sensor.name) + query = ( + db.session.query( + TimedBelief.event_start, + TimedBelief.event_value, + TimedBelief.belief_horizon, ) - else: - query = ( - db.session.query( - TimedBelief.event_start, - TimedBelief.event_value, - TimedBelief.belief_horizon, - ) - .filter( - (TimedBelief.event_start > start - resolution) - & (TimedBelief.event_start < end) - ) - # .filter(TimedBelief.belief_horizon == (TimedBelief.event_start + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function - .join(Sensor) - .filter(Sensor.name == sensor.name) + .filter( + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) ) + # .filter(TimedBelief.belief_horizon == (TimedBelief.event_start + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function + .join(Sensor) + .filter(Sensor.name == sensor.name) + ) + if "horizon" in post_message: + horizon = parse_duration(post_message["horizon"]) + query = query.filter(TimedBelief.belief_horizon == horizon) # todo: after basing sensor data on TimedBelief, we should be able to get a BeliefsDataFrame from the query directly df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] From 78ab21d0241a9a0edbe584f8bf6232786ab28b7b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 24 Dec 2021 16:03:33 +0100 Subject: [PATCH 14/63] Add custom join target to get rid of SA warning Signed-off-by: F.N. Claessen --- flexmeasures/data/models/time_series.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 83ae79641..0d49ba538 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -410,6 +410,7 @@ def search( source_criteria = get_source_criteria( cls, user_source_ids, source_types, exclude_source_types ) + custom_join_targets = [] if parsed_sources else [DataSource] bdf_dict = {} for sensor in sensors: @@ -426,6 +427,7 @@ def search( most_recent_beliefs_only=most_recent_beliefs_only, most_recent_events_only=most_recent_events_only, custom_filter_criteria=source_criteria, + custom_join_targets=custom_join_targets, ) if resolution is not None: bdf = bdf.resample_events( From 109d5476a46144680db393cfa5ed579b007d7c57 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 26 Dec 2021 15:07:32 +0100 Subject: [PATCH 15/63] Filter criteria should work for both TimedBeliefs and TimedValues Signed-off-by: F.N. Claessen --- flexmeasures/data/queries/utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/queries/utils.py b/flexmeasures/data/queries/utils.py index f1382c0bd..282fac9d4 100644 --- a/flexmeasures/data/queries/utils.py +++ b/flexmeasures/data/queries/utils.py @@ -38,7 +38,7 @@ def create_beliefs_query( def get_source_criteria( - cls: "Type[ts.TimedValue]", + cls: "Type[ts.TimedValue, ts.TimedBelief]", user_source_ids: Union[int, List[int]], source_types: List[str], exclude_source_types: List[str], @@ -58,7 +58,7 @@ def get_source_criteria( def user_source_criterion( - cls: "Type[ts.TimedValue]", + cls: "Type[ts.TimedValue, ts.TimedBelief]", user_source_ids: Union[int, List[int]], ) -> BinaryExpression: """Criterion to search only through user data from the specified user sources. @@ -79,7 +79,11 @@ def user_source_criterion( ignorable_user_source_ids = [ user_source.id for user_source in ignorable_user_sources ] - return cls.data_source_id.not_in(ignorable_user_source_ids) + + # todo: [legacy] deprecate this if-statement, which is used to support the TimedValue class + if hasattr(cls, "data_source_id"): + return cls.data_source_id.not_in(ignorable_user_source_ids) + return cls.source_id.not_in(ignorable_user_source_ids) def source_type_criterion(source_types: List[str]) -> BinaryExpression: From ff45672c9a72d01fbabe4e0a72e7d67647426634 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 26 Dec 2021 15:08:22 +0100 Subject: [PATCH 16/63] Clarify docstring Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/validators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/api/common/utils/validators.py b/flexmeasures/api/common/utils/validators.py index 25bfb825a..a45ff673c 100644 --- a/flexmeasures/api/common/utils/validators.py +++ b/flexmeasures/api/common/utils/validators.py @@ -271,7 +271,7 @@ def get_meter_data(user_source_ids): } The source ids then include the user's own id, - and ids of other users that are registered as a Prosumer and/or Energy Service Company. + and ids of other users whose organisation account is registered as a Prosumer and/or Energy Service Company. """ def wrapper(fn): From 47ed019a5105436cd385aa5a5050e32063ba8dda Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 26 Dec 2021 16:13:18 +0100 Subject: [PATCH 17/63] Query TimedBelief rather than Power in api v1 implementations Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/implementations.py | 8 +++++--- flexmeasures/data/models/time_series.py | 12 +++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 1bbc3de14..dca873487 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -13,7 +13,7 @@ ) from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.resources import get_sensors from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.api.common.responses import ( @@ -199,8 +199,8 @@ def collect_connection_and_value_groups( # Get the power values # TODO: fill NaN for non-existing values - power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.search( - old_sensor_names=sensor_names, + power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = TimedBelief.search( + sensor_names, event_starts_after=start, event_ends_before=end, resolution=resolution, @@ -210,6 +210,8 @@ def collect_connection_and_value_groups( beliefs_before=belief_time_window[1], user_source_ids=user_source_ids, source_types=source_types, + most_recent_beliefs_only=True, + one_deterministic_belief_per_event=True, sum_multiple=False, ) # Todo: parse time window of power_bdf_dict, which will be different for requests that are not of the form: diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 0d49ba538..32bb58cd3 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -7,6 +7,7 @@ from sqlalchemy.ext.mutable import MutableDict from sqlalchemy.orm import Query, Session import timely_beliefs as tb +from timely_beliefs.beliefs.probabilistic_utils import get_median_belief import timely_beliefs.utils as tb_utils from flexmeasures.auth.policy import AuthModelMixin @@ -31,7 +32,7 @@ class Sensor(db.Model, tb.SensorDBMixin, AuthModelMixin): - """A sensor measures events. """ + """A sensor measures events.""" attributes = db.Column(MutableDict.as_mutable(db.JSON), nullable=False, default={}) @@ -353,6 +354,7 @@ def search( most_recent_beliefs_only: bool = False, most_recent_events_only: bool = False, most_recent_only: bool = False, # deprecated + one_deterministic_belief_per_event: bool = False, resolution: Union[str, timedelta] = None, sum_multiple: bool = True, ) -> Union[tb.BeliefsDataFrame, Dict[str, tb.BeliefsDataFrame]]: @@ -429,6 +431,14 @@ def search( custom_filter_criteria=source_criteria, custom_join_targets=custom_join_targets, ) + if one_deterministic_belief_per_event: + # todo: compute median of collective belief instead of median of first belief (update expected test results accordingly) + # todo: move to timely-beliefs: select mean/median belief + bdf = ( + bdf.for_each_belief(get_median_belief) + .groupby(level=["event_start", "belief_time"]) + .apply(lambda x: x.head(1)) + ) if resolution is not None: bdf = bdf.resample_events( resolution, keep_only_most_recent_belief=most_recent_beliefs_only From 4eb0856b33fb81f59ad5f0ad42ff55c3f62efc14 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 23 Dec 2021 22:15:23 +0100 Subject: [PATCH 18/63] Schedules should contain one deterministic belief per event Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_3/implementations.py | 1 + flexmeasures/data/models/time_series.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index 41a3d697c..53fe333cf 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -156,6 +156,7 @@ def get_device_message_response(generic_asset_name_groups, duration): event_ends_before=schedule_start + planning_horizon, source=scheduler_source, most_recent_beliefs_only=True, + one_deterministic_belief_per_event=True, ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative consumption_schedule = -simplify_index(power_values)["event_value"] diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 32bb58cd3..bbd009504 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -182,6 +182,7 @@ def search_beliefs( most_recent_beliefs_only: bool = False, most_recent_events_only: bool = False, most_recent_only: bool = False, # deprecated + one_deterministic_belief_per_event: bool = False, as_json: bool = False, ) -> Union[tb.BeliefsDataFrame, str]: """Search all beliefs about events for this sensor. @@ -195,6 +196,7 @@ def search_beliefs( :param source: search only beliefs by this source (pass the DataSource, or its name or id) or list of sources :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) + :param one_deterministic_belief_per_event: only return a single value per event (no probabilistic distribution) :param as_json: return beliefs in JSON format (e.g. for use in charts) rather than as BeliefsDataFrame :returns: BeliefsDataFrame or JSON string (if as_json is True) """ @@ -217,6 +219,7 @@ def search_beliefs( source=source, most_recent_beliefs_only=most_recent_beliefs_only, most_recent_events_only=most_recent_events_only, + one_deterministic_belief_per_event=one_deterministic_belief_per_event, ) if as_json: df = bdf.reset_index() @@ -373,6 +376,7 @@ def search( :param exclude_source_types: Optional list of source type names to exclude specific source types * :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) + :param one_deterministic_belief_per_event: only return a single value per event (no probabilistic distribution) :param resolution: Optional timedelta or pandas freqstr used to resample the results ** :param sum_multiple: if True, sum over multiple sensors; otherwise, return a dictionary with sensor names as key, each holding a BeliefsDataFrame as its value From 45f5bb54ea1a8d05ee20574295b090d795ee40e0 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 10:16:38 +0100 Subject: [PATCH 19/63] Fix type annotation Signed-off-by: F.N. Claessen --- flexmeasures/data/queries/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/data/queries/utils.py b/flexmeasures/data/queries/utils.py index 282fac9d4..98de32b35 100644 --- a/flexmeasures/data/queries/utils.py +++ b/flexmeasures/data/queries/utils.py @@ -38,7 +38,7 @@ def create_beliefs_query( def get_source_criteria( - cls: "Type[ts.TimedValue, ts.TimedBelief]", + cls: "Union[Type[ts.TimedValue], Type[ts.TimedBelief]]", user_source_ids: Union[int, List[int]], source_types: List[str], exclude_source_types: List[str], @@ -58,7 +58,7 @@ def get_source_criteria( def user_source_criterion( - cls: "Type[ts.TimedValue, ts.TimedBelief]", + cls: "Union[Type[ts.TimedValue], Type[ts.TimedBelief]]", user_source_ids: Union[int, List[int]], ) -> BinaryExpression: """Criterion to search only through user data from the specified user sources. From 28041c63fa773ccbf5ca04f516e8a2b254568eac Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 10:18:28 +0100 Subject: [PATCH 20/63] flake8 Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_delete.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flexmeasures/cli/data_delete.py b/flexmeasures/cli/data_delete.py index 690b470b8..005c22a07 100644 --- a/flexmeasures/cli/data_delete.py +++ b/flexmeasures/cli/data_delete.py @@ -6,10 +6,7 @@ from flexmeasures.data import db from flexmeasures.data.models.user import Account, AccountRole, RolesAccounts, User -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.generic_assets import GenericAsset -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather from flexmeasures.data.scripts.data_gen import get_affected_classes from flexmeasures.data.services.users import find_user_by_email, delete_user From 613f59121056503bcdf31cc041bf035f01577d3a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 10:42:21 +0100 Subject: [PATCH 21/63] Query TimedBelief rather than Price/Weather for analytics Signed-off-by: F.N. Claessen --- flexmeasures/data/queries/analytics.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index f61dbfd9c..922290097 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -13,9 +13,8 @@ from flexmeasures.utils import calculations, time_utils from flexmeasures.data.services.resources import Resource, find_closest_sensor from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import Weather, WeatherSensorType +from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.models.weather import WeatherSensorType def get_power_data( @@ -176,7 +175,7 @@ def get_prices_data( market_name = "" if market_sensor is None else market_sensor.name # Get price data - price_bdf: tb.BeliefsDataFrame = Price.search( + price_bdf: tb.BeliefsDataFrame = TimedBelief.search( [market_name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -194,7 +193,7 @@ def get_prices_data( metrics["realised_unit_price"] = np.NaN # Get price forecast - price_forecast_bdf: tb.BeliefsDataFrame = Price.search( + price_forecast_bdf: tb.BeliefsDataFrame = TimedBelief.search( [market_name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -262,7 +261,7 @@ def get_weather_data( sensor_names = [sensor.name for sensor in closest_sensors] # Get weather data - weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.search( + weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = TimedBelief.search( sensor_names, event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -279,7 +278,9 @@ def get_weather_data( ) # Get weather forecasts - weather_forecast_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.search( + weather_forecast_bdf_dict: Dict[ + str, tb.BeliefsDataFrame + ] = TimedBelief.search( sensor_names, event_starts_after=query_window[0], event_ends_before=query_window[1], From 3ca9a8f3e883d111d8105dd179f811e3b3696e41 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 10:43:03 +0100 Subject: [PATCH 22/63] Query deterministic TimedBelief rather than Price for planning queries Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index 62186c747..8cdf26394 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -7,8 +7,7 @@ import numpy as np import timely_beliefs as tb -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.planning.exceptions import ( UnknownMarketException, UnknownPricesException, @@ -82,11 +81,13 @@ def get_prices( # Look for the applicable market sensor sensor = get_market(sensor) - price_bdf: tb.BeliefsDataFrame = Price.search( + price_bdf: tb.BeliefsDataFrame = TimedBelief.search( sensor.name, event_starts_after=query_window[0], event_ends_before=query_window[1], resolution=to_offset(resolution).freqstr, + most_recent_beliefs_only=True, + one_deterministic_belief_per_event=True, ) price_df = simplify_index(price_bdf) nan_prices = price_df.isnull().values From 27dc9efa5ae38f5aa913f02f503360845455e530 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 11:58:08 +0100 Subject: [PATCH 23/63] Forecast TimedBelief rather than Power/Price/Weather Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/implementations.py | 1 - flexmeasures/api/v1_1/implementations.py | 2 - flexmeasures/api/v1_1/tests/test_api_v1_1.py | 9 ++--- .../api/v1_1/tests/test_api_v1_1_fresh_db.py | 2 +- flexmeasures/api/v1_1/tests/utils.py | 11 +++--- .../api/v2_0/implementations/sensors.py | 3 -- .../tests/test_api_v2_0_sensors_fresh_db.py | 2 - flexmeasures/cli/data_add.py | 13 ------- flexmeasures/cli/testing.py | 2 - .../models/forecasting/model_spec_factory.py | 18 ++++----- flexmeasures/data/models/forecasting/utils.py | 10 ++--- flexmeasures/data/models/utils.py | 18 --------- flexmeasures/data/scripts/data_gen.py | 4 -- flexmeasures/data/services/forecasting.py | 38 ++++--------------- .../data/tests/test_forecasting_jobs.py | 6 --- .../tests/test_forecasting_jobs_fresh_db.py | 4 -- 16 files changed, 31 insertions(+), 112 deletions(-) delete mode 100644 flexmeasures/data/models/utils.py diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index dca873487..2d41991bc 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -319,7 +319,6 @@ def create_connection_and_value_groups( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( - Power, sensor_id, start, start + duration, diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index 016bcec45..c3c08c5bd 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -128,7 +128,6 @@ def post_price_data_response( if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( - Price, sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, @@ -216,7 +215,6 @@ def post_weather_data_response( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( - Weather, sensor.id, start, start + duration, diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1.py b/flexmeasures/api/v1_1/tests/test_api_v1_1.py index 514058311..440b37abc 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1.py @@ -24,7 +24,6 @@ from flexmeasures.auth.error_handling import UNAUTH_ERROR_STATUS from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.user import User -from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor @@ -155,7 +154,6 @@ def test_post_price_data(setup_api_test_data, db, app, clean_redis, post_message for job, horizon in zip(jobs, horizons): assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon - assert job.kwargs["timed_value_type"] == Price assert job.kwargs["old_sensor_id"] == market.id @@ -195,9 +193,9 @@ def test_post_weather_forecasts( ): """ Try to post wind speed and temperature forecasts as a logged-in test user with the Prosumer role, which should succeed. - As only forecasts are sent, no forecasting jobs are expected. + As only forecasts are sent, no additional forecasting jobs are expected. """ - assert len(get_forecasting_jobs("Weather")) == 0 + num_jobs_before = len(get_forecasting_jobs()) # post weather data auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") @@ -210,7 +208,8 @@ def test_post_weather_forecasts( assert post_weather_data_response.status_code == 200 assert post_weather_data_response.json["type"] == "PostWeatherDataResponse" - assert len(get_forecasting_jobs("Weather")) == 0 + num_jobs_after = len(get_forecasting_jobs()) + assert num_jobs_after == num_jobs_before @pytest.mark.parametrize( diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py b/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py index c99b04434..aee81bf6e 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py @@ -78,7 +78,7 @@ def test_post_weather_data( assert post_weather_data_response.json["type"] == "PostWeatherDataResponse" forecast_horizons = forecast_horizons_for(timedelta(minutes=5)) - jobs = get_forecasting_jobs("Weather") + jobs = get_forecasting_jobs(last_n=len(forecast_horizons)) for job, horizon in zip( sorted(jobs, key=lambda x: x.kwargs["horizon"]), forecast_horizons ): diff --git a/flexmeasures/api/v1_1/tests/utils.py b/flexmeasures/api/v1_1/tests/utils.py index d0083a76d..3d4bc0d49 100644 --- a/flexmeasures/api/v1_1/tests/utils.py +++ b/flexmeasures/api/v1_1/tests/utils.py @@ -177,9 +177,8 @@ def verify_prices_in_db(post_message, values, db, swapped_sign: bool = False): assert df["event_value"].tolist() == values -def get_forecasting_jobs(timed_value_type: str) -> List[Job]: - return [ - job - for job in current_app.queues["forecasting"].jobs - if job.kwargs["timed_value_type"] == timed_value_type - ] +def get_forecasting_jobs(last_n: Optional[int] = None) -> List[Job]: + """Get all or last n forecasting jobs.""" + if last_n: + return current_app.queues["forecasting"].jobs[-last_n:] + return current_app.queues["forecasting"].jobs diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index 0264da331..ee6ce40a7 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -117,7 +117,6 @@ def post_price_data_response( # noqa C901 if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( - Price, sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, @@ -208,7 +207,6 @@ def post_weather_data_response( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset forecasting_jobs.extend( create_forecasting_jobs( - Weather, sensor.id, start, start + duration, @@ -373,7 +371,6 @@ def post_power_data( if create_forecasting_jobs_too: forecasting_jobs.extend( create_forecasting_jobs( - Power, sensor_id, start, start + duration, diff --git a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py index 2b9a24e68..0436693c9 100644 --- a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py +++ b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py @@ -10,7 +10,6 @@ message_for_post_price_data, verify_sensor_data_in_db, ) -from flexmeasures.data.models.markets import Price @pytest.mark.parametrize( @@ -60,5 +59,4 @@ def test_post_price_data_2_0( for job, horizon in zip(jobs, horizons): assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon - assert job.kwargs["timed_value_type"] == Price assert job.kwargs["old_sensor_id"] == market.id diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 7202abfa3..1ae5a0560 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -16,9 +16,6 @@ from flexmeasures.data import db from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.data.services.users import create_user -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather from flexmeasures.data.models.user import Account, AccountRole, RolesAccounts from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.schemas.sensors import SensorSchema @@ -609,21 +606,11 @@ def create_forecasts( event_resolution = None if as_job: - if asset_type == "Asset": - value_type = Power - elif asset_type == "Market": - value_type = Price - elif asset_type == "WeatherSensor": - value_type = Weather - else: - raise TypeError(f"Unknown asset_type {asset_type}") - for horizon in horizons: # Note that this time period refers to the period of events we are forecasting, while in create_forecasting_jobs # the time period refers to the period of belief_times, therefore we are subtracting the horizon. create_forecasting_jobs( old_sensor_id=asset_id, - timed_value_type=value_type, horizons=[horizon], start_of_roll=forecast_start - horizon, end_of_roll=forecast_end - horizon, diff --git a/flexmeasures/cli/testing.py b/flexmeasures/cli/testing.py index 00ba824ce..de5621788 100644 --- a/flexmeasures/cli/testing.py +++ b/flexmeasures/cli/testing.py @@ -55,7 +55,6 @@ def test_making_forecasts(): create_forecasting_jobs( old_sensor_id=sensor_id, - timed_value_type=Power, horizons=[timedelta(hours=6)], start_of_roll=as_server_time(datetime(2015, 4, 1)), end_of_roll=as_server_time(datetime(2015, 4, 3)), @@ -151,7 +150,6 @@ def test_generic_model( fallback_model_identifier, ) = linear_model_configurator( sensor=sensors[0], - time_series_class=TimedValueType, forecast_start=start, forecast_end=end, forecast_horizon=horizon, diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 8219477a9..74ec0b781 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Dict, List, Optional, Union from datetime import datetime, timedelta, tzinfo from pprint import pformat import logging @@ -19,10 +19,8 @@ ) import pandas as pd -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import Weather, WeatherSensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.models.weather import WeatherSensor from flexmeasures.data.models.forecasting.utils import ( create_lags, set_training_and_testing_dates, @@ -53,9 +51,9 @@ class TBSeriesSpecs(SeriesSpecs): def __init__( self, - time_series_class, search_params: dict, name: str, + time_series_class: Optional[type] = TimedBelief, search_fnc: str = "search", original_tz: Optional[tzinfo] = pytz.utc, # postgres stores naive datetimes feature_transformation: Optional[ReversibleTransformation] = None, @@ -115,7 +113,6 @@ def check_data(self, df: pd.DataFrame): def create_initial_model_specs( # noqa: C901 sensor: Sensor, - time_series_class: Type[Union[Power, Price, Weather]], forecast_start: datetime, # Start of forecast period forecast_end: datetime, # End of forecast period forecast_horizon: timedelta, # Duration between time of forecasting and end time of the event that is forecast @@ -126,6 +123,7 @@ def create_initial_model_specs( # noqa: C901 custom_model_params: Optional[ dict ] = None, # overwrite model params, most useful for tests or experiments + time_series_class: Optional[type] = TimedBelief, ) -> ModelSpecs: """ Generic model specs for all asset types (also for markets and weather sensors) and horizons. @@ -179,7 +177,7 @@ def create_initial_model_specs( # noqa: C901 name=sensor.generic_asset.generic_asset_type.name, time_series_class=time_series_class, search_params=dict( - old_sensor_names=[sensor.name], + sensors=sensor, event_starts_after=query_window[0], event_ends_before=query_window[1], horizons_at_least=None, @@ -295,9 +293,9 @@ def configure_regressors_for_nearest_weather_sensor( regressor_specs.append( TBSeriesSpecs( name=regressor_specs_name, - time_series_class=Weather, + time_series_class=TimedBelief, search_params=dict( - old_sensor_names=[closest_sensor.name], + sensors=closest_sensor, event_starts_after=query_window[0], event_ends_before=query_window[1], horizons_at_least=horizon, diff --git a/flexmeasures/data/models/forecasting/utils.py b/flexmeasures/data/models/forecasting/utils.py index e095ff01e..17273b3aa 100644 --- a/flexmeasures/data/models/forecasting/utils.py +++ b/flexmeasures/data/models/forecasting/utils.py @@ -21,14 +21,14 @@ def check_data_availability( q = old_time_series_data_model.query.join(old_sensor_model.__class__).filter( old_sensor_model.__class__.name == old_sensor_model.name ) - first_value = q.order_by(old_time_series_data_model.datetime.asc()).first() - last_value = q.order_by(old_time_series_data_model.datetime.desc()).first() + first_value = q.order_by(old_time_series_data_model.event_start.asc()).first() + last_value = q.order_by(old_time_series_data_model.event_start.desc()).first() if first_value is None: raise NotEnoughDataException( "No data available at all. Forecasting impossible." ) - first = as_server_time(first_value.datetime) - last = as_server_time(last_value.datetime) + first = as_server_time(first_value.event_start) + last = as_server_time(last_value.event_start) if query_window[0] < first: suggested_start = forecast_start + (first - query_window[0]) raise NotEnoughDataException( @@ -56,7 +56,7 @@ def create_lags( resolution: timedelta, use_periodicity: bool, ) -> List[timedelta]: - """ List the lags for this asset type, using horizon and resolution information.""" + """List the lags for this asset type, using horizon and resolution information.""" lags = [] # Include a zero lag in case of backwards forecasting diff --git a/flexmeasures/data/models/utils.py b/flexmeasures/data/models/utils.py deleted file mode 100644 index f45040931..000000000 --- a/flexmeasures/data/models/utils.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import Union, Type - -from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.weather import WeatherSensor, Weather - - -def determine_old_time_series_class_by_old_sensor( - old_sensor: Union[Asset, Market, WeatherSensor] -) -> Type[Union[Power, Price, Weather]]: - if isinstance(old_sensor, Asset): - return Power - elif isinstance(old_sensor, Market): - return Price - elif isinstance(old_sensor, WeatherSensor): - return Weather - else: - raise TypeError("Unknown old sensor type.") diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index fe478538f..4f70098af 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -27,7 +27,6 @@ from flexmeasures.data.models.user import User, Role, RolesUsers from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import NotEnoughDataException -from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor from flexmeasures.utils.time_utils import ensure_local_timezone from flexmeasures.data.transactional import as_transaction @@ -266,9 +265,6 @@ def populate_time_series_forecasts( # noqa: C901 default_model = lookup_model_specs_configurator() model_specs, model_identifier, model_fallback = default_model( sensor=old_sensor.corresponding_sensor, - time_series_class=determine_old_time_series_class_by_old_sensor( - old_sensor - ), forecast_start=forecast_start, forecast_end=forecast_end, forecast_horizon=horizon, diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 0dc111433..6e9e9e176 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -1,25 +1,23 @@ from datetime import datetime, timedelta -from typing import List, Type, Union +from typing import List from flask import current_app import click from rq import get_current_job from rq.job import Job -from sqlalchemy.exc import IntegrityError from timetomodel.forecasting import make_rolling_forecasts +import timely_beliefs as tb +from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException -from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.forecasting.utils import ( get_query_window, check_data_availability, ) -from flexmeasures.data.models.weather import Weather -from flexmeasures.data.utils import save_to_session, get_data_source +from flexmeasures.data.utils import get_data_source from flexmeasures.utils.time_utils import ( as_server_time, server_now, @@ -46,7 +44,6 @@ class MisconfiguredForecastingJobException(Exception): def create_forecasting_jobs( - timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], old_sensor_id: int, start_of_roll: datetime, end_of_roll: datetime, @@ -101,7 +98,6 @@ def create_forecasting_jobs( make_rolling_viewpoint_forecasts, kwargs=dict( old_sensor_id=old_sensor_id, - timed_value_type=timed_value_type, horizon=horizon, start=start_of_roll + horizon, end=end_of_roll + horizon, @@ -124,7 +120,6 @@ def create_forecasting_jobs( def make_fixed_viewpoint_forecasts( old_sensor_id: int, - timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], horizon: timedelta, start: datetime, end: datetime, @@ -142,7 +137,6 @@ def make_fixed_viewpoint_forecasts( def make_rolling_viewpoint_forecasts( old_sensor_id: int, - timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], horizon: timedelta, start: datetime, end: datetime, @@ -159,8 +153,6 @@ def make_rolling_viewpoint_forecasts( ---------- :param old_sensor_id: int To identify which old sensor to forecast (note: old_sensor_id == sensor_id) - :param timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]] - This should go away after a refactoring - we now use it to create the DB entry for the forecasts :param horizon: timedelta duration between the end of each interval and the time at which the belief about that interval is formed :param start: datetime @@ -198,7 +190,6 @@ def make_rolling_viewpoint_forecasts( model_configurator = lookup_model_specs_configurator(model_search_term) model_specs, model_identifier, fallback_model_search_term = model_configurator( sensor=sensor, - time_series_class=timed_value_type, forecast_start=as_server_time(start), forecast_end=as_server_time(end), forecast_horizon=horizon, @@ -224,7 +215,7 @@ def make_rolling_viewpoint_forecasts( ) check_data_availability( sensor, - timed_value_type, + TimedBelief, start, end, query_window, @@ -245,8 +236,7 @@ def make_rolling_viewpoint_forecasts( click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) ts_value_forecasts = [ - Power( - use_legacy_kwargs=False, + TimedBelief( event_start=dt, belief_horizon=horizon, event_value=value, @@ -255,20 +245,8 @@ def make_rolling_viewpoint_forecasts( ) for dt, value in forecasts.items() ] - - try: - save_to_session(ts_value_forecasts) - except IntegrityError as e: - - current_app.logger.warning(e) - click.echo("Rolling back due to IntegrityError") - db.session.rollback() - - if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - click.echo("Saving again, with overwrite=True") - save_to_session(ts_value_forecasts, overwrite=True) - - db.session.commit() + bdf = tb.BeliefsDataFrame(ts_value_forecasts) + save_to_db(bdf) return len(forecasts) diff --git a/flexmeasures/data/tests/test_forecasting_jobs.py b/flexmeasures/data/tests/test_forecasting_jobs.py index d86ecd38a..df839f913 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs.py +++ b/flexmeasures/data/tests/test_forecasting_jobs.py @@ -7,7 +7,6 @@ from rq.job import Job from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.forecasting import ( @@ -59,7 +58,6 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): # makes 4 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 6)), end_of_roll=as_server_time(datetime(2015, 1, 1, 7)), horizons=[horizon], @@ -102,7 +100,6 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ # makes 4 forecasts, 1 of which is for a new datetime index horizon = timedelta(hours=6) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=last_power_datetime - horizon - timedelta(minutes=30), # start of data on which forecast is based (5.15pm) @@ -176,7 +173,6 @@ def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data) (Power data is in 2015)""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2016, 1, 1, 20)), end_of_roll=as_server_time(datetime(2016, 1, 1, 22)), horizons=[timedelta(hours=1)], @@ -191,7 +187,6 @@ def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): """This one (as well as the fallback) should fail as the horizon is invalid.""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 21)), end_of_roll=as_server_time(datetime(2015, 1, 1, 23)), horizons=[timedelta(hours=18)], @@ -211,7 +206,6 @@ def test_failed_unknown_model(app, clean_redis, setup_test_data): cmp["training_and_testing_period"] = timedelta(days=365) create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], diff --git a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py index e7b1bd815..b21a8f959 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py @@ -3,7 +3,6 @@ import pytest from sqlalchemy.orm import Query -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, @@ -25,7 +24,6 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis # makes 12 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 10)), end_of_roll=as_server_time(datetime(2015, 1, 1, 13)), horizons=[horizon], @@ -58,7 +56,6 @@ def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis) # makes 8 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], @@ -106,7 +103,6 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( # The failed test model (this failure enqueues a new job) create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, hour_start)), end_of_roll=as_server_time(datetime(2015, 1, 1, hour_start + 2)), horizons=[horizon], From 66e6da0e6d4043555830d464356d81b699742659 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 12:08:08 +0100 Subject: [PATCH 24/63] Schedule TimedBelief rather than Power Signed-off-by: F.N. Claessen --- flexmeasures/data/services/scheduling.py | 27 ++++++------------------ 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index 10928f2ae..d80150a49 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -8,14 +8,14 @@ import pytz from rq import get_current_job from rq.job import Job -from sqlalchemy.exc import IntegrityError +import timely_beliefs as tb +from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.utils import save_to_session, get_data_source +from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.utils import get_data_source """ The life cycle of a scheduling job: @@ -145,8 +145,7 @@ def make_schedule( click.echo("Job %s made schedule." % rq_job.id) ts_value_schedule = [ - Power( - use_legacy_kwargs=False, + TimedBelief( event_start=dt, belief_horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), event_value=-value, @@ -155,20 +154,8 @@ def make_schedule( ) for dt, value in consumption_schedule.items() ] # For consumption schedules, positive values denote consumption. For the db, consumption is negative - - try: - save_to_session(ts_value_schedule) - except IntegrityError as e: - - current_app.logger.warning(e) - click.echo("Rolling back due to IntegrityError") - db.session.rollback() - - if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - click.echo("Saving again, with overwrite=True") - save_to_session(ts_value_schedule, overwrite=True) - - db.session.commit() + bdf = tb.BeliefsDataFrame(ts_value_schedule) + save_to_db(bdf) return True From fb58ec7459dd69c571bee27cdce61e67c14617ae Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 12:14:35 +0100 Subject: [PATCH 25/63] Apparently, to initialize a TimedBelief is to save a TimedBelief, too Signed-off-by: F.N. Claessen --- flexmeasures/data/services/forecasting.py | 8 +------- flexmeasures/data/services/scheduling.py | 10 ++-------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 6e9e9e176..d689962ac 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -6,9 +6,7 @@ from rq import get_current_job from rq.job import Job from timetomodel.forecasting import make_rolling_forecasts -import timely_beliefs as tb -from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException @@ -235,7 +233,7 @@ def make_rolling_viewpoint_forecasts( ) click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) - ts_value_forecasts = [ + for dt, value in forecasts.items(): TimedBelief( event_start=dt, belief_horizon=horizon, @@ -243,10 +241,6 @@ def make_rolling_viewpoint_forecasts( sensor=sensor, source=data_source, ) - for dt, value in forecasts.items() - ] - bdf = tb.BeliefsDataFrame(ts_value_forecasts) - save_to_db(bdf) return len(forecasts) diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index d80150a49..bed0975c9 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -8,9 +8,7 @@ import pytz from rq import get_current_job from rq.job import Job -import timely_beliefs as tb -from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station @@ -144,18 +142,14 @@ def make_schedule( ) click.echo("Job %s made schedule." % rq_job.id) - ts_value_schedule = [ + for dt, value in consumption_schedule.items(): TimedBelief( event_start=dt, belief_horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), event_value=-value, sensor=sensor, source=data_source, - ) - for dt, value in consumption_schedule.items() - ] # For consumption schedules, positive values denote consumption. For the db, consumption is negative - bdf = tb.BeliefsDataFrame(ts_value_schedule) - save_to_db(bdf) + ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative return True From b15a4452b57035e4f34385869a844c4ce381bc91 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 12:24:20 +0100 Subject: [PATCH 26/63] Create TimedBelief rather than Power/Price/Weather in data generation script Signed-off-by: F.N. Claessen --- flexmeasures/data/scripts/data_gen.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index 4f70098af..8aafccd96 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -20,10 +20,10 @@ import inflect from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.models.markets import MarketType, Market, Price -from flexmeasures.data.models.assets import AssetType, Asset, Power +from flexmeasures.data.models.markets import MarketType, Market +from flexmeasures.data.models.assets import AssetType, Asset from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather +from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor from flexmeasures.data.models.user import User, Role, RolesUsers from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import NotEnoughDataException @@ -157,11 +157,12 @@ def add_dummy_tou_market(db: SQLAlchemy): unit="EUR/MWh", ) db.session.add(market) - source = DataSource.query.filter(DataSource.name == "Seita").one_or_none() + source = DataSource.query.filter( + DataSource.name == "Seita", DataSource.type == "demo script" + ).one_or_none() for year in range(2015, 2025): db.session.add( - Price( - use_legacy_kwargs=False, + TimedBelief( event_value=50, event_start=datetime(year, 1, 1, tzinfo=pytz.utc), belief_horizon=timedelta(0), @@ -317,8 +318,7 @@ def populate_time_series_forecasts( # noqa: C901 beliefs = [] if isinstance(old_sensor, Asset): beliefs = [ - Power( - use_legacy_kwargs=False, + TimedBelief( event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), belief_horizon=horizon, event_value=value, @@ -329,8 +329,7 @@ def populate_time_series_forecasts( # noqa: C901 ] elif isinstance(old_sensor, Market): beliefs = [ - Price( - use_legacy_kwargs=False, + TimedBelief( event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), belief_horizon=horizon, event_value=value, @@ -341,8 +340,7 @@ def populate_time_series_forecasts( # noqa: C901 ] elif isinstance(old_sensor, WeatherSensor): beliefs = [ - Weather( - use_legacy_kwargs=False, + TimedBelief( event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), belief_horizon=horizon, event_value=value, From 6af5027f22410e29303f6341fabd1109410f77b2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 14:04:53 +0100 Subject: [PATCH 27/63] Bump timely-beliefs dependency Signed-off-by: F.N. Claessen --- requirements/app.in | 2 +- requirements/app.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/app.in b/requirements/app.in index 8b791b450..fa3961a06 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -32,7 +32,7 @@ netCDF4 siphon tables timetomodel>=0.7.1 -timely-beliefs>=1.8.0 +timely-beliefs>=1.9.0 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index 3b2eaa204..95836d9f3 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -354,7 +354,7 @@ tables==3.6.1 # via -r requirements/app.in threadpoolctl==3.0.0 # via scikit-learn -timely-beliefs==1.8.0 +timely-beliefs==1.9.0 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in From 809c6d0d0ab0f8be720f5959b3eedbc7b3cb89b1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 16:25:49 +0100 Subject: [PATCH 28/63] Fix latest state query Signed-off-by: F.N. Claessen --- flexmeasures/data/models/time_series.py | 1 + flexmeasures/ui/charts/latest_state.py | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index bbd009504..c62e89d4a 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -166,6 +166,7 @@ def latest_state( source=source, most_recent_beliefs_only=True, most_recent_events_only=True, + one_deterministic_belief_per_event=True, ) def search_beliefs( diff --git a/flexmeasures/ui/charts/latest_state.py b/flexmeasures/ui/charts/latest_state.py index 8422c5e57..1d34d5f83 100644 --- a/flexmeasures/ui/charts/latest_state.py +++ b/flexmeasures/ui/charts/latest_state.py @@ -34,14 +34,15 @@ def get_latest_power_as_plot(sensor: Sensor, small: bool = False) -> Tuple[str, latest_power = sensor.latest_state() if not latest_power.empty: - # TODO: Get first entry - latest_power_value = latest_power.event_value + latest_power_value = latest_power["event_value"].values[0] if current_app.config.get("FLEXMEASURES_MODE", "") == "demo": - latest_power_datetime = latest_power.belief_time.replace( - year=datetime.now().year + latest_power_datetime = ( + latest_power.event_ends[0] + .to_pydatetime() + .replace(year=datetime.now().year) ) else: - latest_power_datetime = latest_power.belief_time + latest_power_datetime = latest_power.event_ends[0].to_pydatetime() latest_measurement_time_str = localized_datetime_str( latest_power_datetime + sensor.event_resolution ) From 4c94a9723d676afca63ac2103f8fb53264ad6d4d Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Tue, 28 Dec 2021 11:25:47 +0100 Subject: [PATCH 29/63] Revert "Apparently, to initialize a TimedBelief is to save a TimedBelief, too" This reverts commit fb58ec7459dd69c571bee27cdce61e67c14617ae. --- flexmeasures/data/services/forecasting.py | 8 +++++++- flexmeasures/data/services/scheduling.py | 10 ++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index d689962ac..6e9e9e176 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -6,7 +6,9 @@ from rq import get_current_job from rq.job import Job from timetomodel.forecasting import make_rolling_forecasts +import timely_beliefs as tb +from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException @@ -233,7 +235,7 @@ def make_rolling_viewpoint_forecasts( ) click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) - for dt, value in forecasts.items(): + ts_value_forecasts = [ TimedBelief( event_start=dt, belief_horizon=horizon, @@ -241,6 +243,10 @@ def make_rolling_viewpoint_forecasts( sensor=sensor, source=data_source, ) + for dt, value in forecasts.items() + ] + bdf = tb.BeliefsDataFrame(ts_value_forecasts) + save_to_db(bdf) return len(forecasts) diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index bed0975c9..d80150a49 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -8,7 +8,9 @@ import pytz from rq import get_current_job from rq.job import Job +import timely_beliefs as tb +from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station @@ -142,14 +144,18 @@ def make_schedule( ) click.echo("Job %s made schedule." % rq_job.id) - for dt, value in consumption_schedule.items(): + ts_value_schedule = [ TimedBelief( event_start=dt, belief_horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), event_value=-value, sensor=sensor, source=data_source, - ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative + ) + for dt, value in consumption_schedule.items() + ] # For consumption schedules, positive values denote consumption. For the db, consumption is negative + bdf = tb.BeliefsDataFrame(ts_value_schedule) + save_to_db(bdf) return True From 9f618eec4426616a927d292587bc1d0ed0ad565f Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Tue, 28 Dec 2021 11:32:58 +0100 Subject: [PATCH 30/63] Prevent saving TimedBelief to session upon updating Sensor or Source Signed-off-by: F.N. Claessen --- flexmeasures/data/models/time_series.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index c62e89d4a..872e4aabc 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -325,8 +325,22 @@ class TimedBelief(db.Model, tb.TimedBeliefDBMixin): def source_id(cls): return db.Column(db.Integer, db.ForeignKey("data_source.id"), primary_key=True) - sensor = db.relationship("Sensor", backref=db.backref("beliefs", lazy=True)) - source = db.relationship("DataSource", backref=db.backref("beliefs", lazy=True)) + sensor = db.relationship( + "Sensor", + backref=db.backref( + "beliefs", + lazy=True, + cascade="merge", # i.e. no save-update + ), + ) + source = db.relationship( + "DataSource", + backref=db.backref( + "beliefs", + lazy=True, + cascade="merge", # i.e. no save-update + ), + ) def __init__( self, From fb5d3118dba68fdd3ad3b49abab465e191534a64 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 16:38:39 +0100 Subject: [PATCH 31/63] Create only TimedBeliefs in conftests Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/tests/conftest.py | 9 ++++----- flexmeasures/api/v1_1/tests/conftest.py | 11 ++++------- flexmeasures/conftest.py | 13 +++++-------- flexmeasures/data/tests/conftest.py | 6 +++--- 4 files changed, 16 insertions(+), 23 deletions(-) diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 7803e7088..55dff55bf 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -7,6 +7,7 @@ from flask_security.utils import hash_password from flexmeasures.data.services.users import create_user +from flexmeasures.data.models.time_series import TimedBelief @pytest.fixture(scope="module", autouse=True) @@ -16,7 +17,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices """ print("Setting up data for API v1 tests on %s" % db.engine) - from flexmeasures.data.models.assets import Asset, AssetType, Power + from flexmeasures.data.models.assets import Asset, AssetType from flexmeasures.data.models.data_sources import DataSource # Create an anonymous user TODO: used for demo purposes, maybe "demo-user" would be a better name @@ -90,8 +91,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices ).one_or_none() meter_data = [] for i in range(6): - p_1 = Power( - use_legacy_kwargs=False, + p_1 = TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(0), @@ -99,8 +99,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_5.corresponding_sensor, source=user1_data_source, ) - p_2 = Power( - use_legacy_kwargs=False, + p_2 = TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=0), diff --git a/flexmeasures/api/v1_1/tests/conftest.py b/flexmeasures/api/v1_1/tests/conftest.py index a63d4eb51..e27ff6aa5 100644 --- a/flexmeasures/api/v1_1/tests/conftest.py +++ b/flexmeasures/api/v1_1/tests/conftest.py @@ -6,8 +6,8 @@ from flask_security import SQLAlchemySessionUserDatastore from flask_security.utils import hash_password -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief @pytest.fixture(scope="module") @@ -57,8 +57,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices data_source = DataSource.query.filter(DataSource.user == test_user).one_or_none() power_forecasts = [] for i in range(6): - p_1 = Power( - use_legacy_kwargs=False, + p_1 = TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -66,8 +65,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_1.corresponding_sensor, source=data_source, ) - p_2 = Power( - use_legacy_kwargs=False, + p_2 = TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -75,8 +73,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_2.corresponding_sensor, source=data_source, ) - p_3 = Power( - use_legacy_kwargs=False, + p_3 = TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 393a863c6..6366883d0 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -23,11 +23,11 @@ from flexmeasures.auth.policy import ADMIN_ROLE from flexmeasures.utils.time_utils import as_server_time from flexmeasures.data.services.users import create_user -from flexmeasures.data.models.assets import AssetType, Asset, Power +from flexmeasures.data.models.assets import AssetType, Asset from flexmeasures.data.models.generic_assets import GenericAssetType, GenericAsset from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.weather import WeatherSensor, WeatherSensorType -from flexmeasures.data.models.markets import Market, MarketType, Price +from flexmeasures.data.models.markets import Market, MarketType from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.user import User, Account, AccountRole @@ -336,8 +336,7 @@ def setup_assets( for x in range(len(time_slots)) ] for dt, val in zip(time_slots, values): - p = Power( - use_legacy_kwargs=False, + p = TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, @@ -402,8 +401,7 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources random() * (1 + np.sin(x * 2 * np.pi / 24)) for x in range(len(time_slots)) ] for dt, val in zip(time_slots, values): - p = Price( - use_legacy_kwargs=False, + p = TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, @@ -418,8 +416,7 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources ) values = [100] * 8 + [90] * 8 + [100] * 8 for dt, val in zip(time_slots, values): - p = Price( - use_legacy_kwargs=False, + p = TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index c60816729..126850e61 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -9,8 +9,9 @@ from flask_sqlalchemy import SQLAlchemy from statsmodels.api import OLS -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather from flexmeasures.data.models.forecasting import model_map from flexmeasures.data.models.forecasting.model_spec_factory import ( @@ -75,8 +76,7 @@ def setup_fresh_test_data( ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] for dt, val in zip(time_slots, values): - p = Power( - use_legacy_kwargs=False, + p = TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, From 23e42a1bc88291fb8fd55eac2eb5ab2c47d992da Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 27 Dec 2021 17:13:58 +0100 Subject: [PATCH 32/63] Use session.add_all calls instead of session.bulk_save_objects or individual session.add calls Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/tests/conftest.py | 16 +++++++++------- flexmeasures/api/v1_1/tests/conftest.py | 22 +++++++++++++--------- flexmeasures/conftest.py | 24 +++++++++++++++--------- flexmeasures/data/tests/conftest.py | 8 +++++--- 4 files changed, 42 insertions(+), 28 deletions(-) diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 55dff55bf..86880b58f 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -89,9 +89,8 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices user2_data_source = DataSource.query.filter( DataSource.user == test_user_2 ).one_or_none() - meter_data = [] - for i in range(6): - p_1 = TimedBelief( + user1_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(0), @@ -99,7 +98,10 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_5.corresponding_sensor, source=user1_data_source, ) - p_2 = TimedBelief( + for i in range(6) + ] + user2_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=0), @@ -107,9 +109,9 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_5.corresponding_sensor, source=user2_data_source, ) - meter_data.append(p_1) - meter_data.append(p_2) - db.session.bulk_save_objects(meter_data) + for i in range(6) + ] + db.session.add_all(user1_beliefs + user2_beliefs) print("Done setting up data for API v1 tests") diff --git a/flexmeasures/api/v1_1/tests/conftest.py b/flexmeasures/api/v1_1/tests/conftest.py index e27ff6aa5..aff57aa0a 100644 --- a/flexmeasures/api/v1_1/tests/conftest.py +++ b/flexmeasures/api/v1_1/tests/conftest.py @@ -55,9 +55,8 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices cs_2 = Asset.query.filter(Asset.name == "CS 2").one_or_none() cs_3 = Asset.query.filter(Asset.name == "CS 3").one_or_none() data_source = DataSource.query.filter(DataSource.user == test_user).one_or_none() - power_forecasts = [] - for i in range(6): - p_1 = TimedBelief( + cs1_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -65,7 +64,10 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_1.corresponding_sensor, source=data_source, ) - p_2 = TimedBelief( + for i in range(6) + ] + cs2_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -73,7 +75,10 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_2.corresponding_sensor, source=data_source, ) - p_3 = TimedBelief( + for i in range(6) + ] + cs3_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -81,10 +86,9 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_3.corresponding_sensor, source=data_source, ) - power_forecasts.append(p_1) - power_forecasts.append(p_2) - power_forecasts.append(p_3) - db.session.bulk_save_objects(power_forecasts) + for i in range(6) + ] + db.session.add_all(cs1_beliefs + cs2_beliefs + cs3_beliefs) print("Done setting up data for API v1.1 tests") diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 6366883d0..28bbbf0e3 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -335,15 +335,17 @@ def setup_assets( random() * (1 + np.sin(x * 2 * np.pi / (4 * 24))) for x in range(len(time_slots)) ] - for dt, val in zip(time_slots, values): - p = TimedBelief( + beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, sensor=asset.corresponding_sensor, source=setup_sources["Seita"], ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(beliefs) return {asset.name: asset for asset in assets} @@ -400,30 +402,34 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources values = [ random() * (1 + np.sin(x * 2 * np.pi / 24)) for x in range(len(time_slots)) ] - for dt, val in zip(time_slots, values): - p = TimedBelief( + day1_beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, source=setup_sources["Seita"], sensor=setup_markets["epex_da"].corresponding_sensor, ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(day1_beliefs) # another day of test data (8 expensive hours, 8 cheap hours, and again 8 expensive hours) time_slots = pd.date_range( datetime(2015, 1, 2), datetime(2015, 1, 3), freq="1H", closed="left" ) values = [100] * 8 + [90] * 8 + [100] * 8 - for dt, val in zip(time_slots, values): - p = TimedBelief( + day2_beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, source=setup_sources["Seita"], sensor=setup_markets["epex_da"].corresponding_sensor, ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(day2_beliefs) @pytest.fixture(scope="module") diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index 126850e61..a0fa76fc4 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -75,15 +75,17 @@ def setup_fresh_test_data( datetime(2015, 1, 1), datetime(2015, 1, 1, 23, 45), freq="15T" ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] - for dt, val in zip(time_slots, values): - p = TimedBelief( + beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, sensor=asset.corresponding_sensor, source=data_source, ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(beliefs) add_test_weather_sensor_and_forecasts(fresh_db) From b942fa0244e4d3db9d3052f6391a886726216440 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Tue, 28 Dec 2021 14:09:21 +0100 Subject: [PATCH 33/63] API directly creates TimedBeliefs Signed-off-by: F.N. Claessen --- flexmeasures/api/common/responses.py | 18 ++- flexmeasures/api/common/utils/api_utils.py | 45 ++++++- flexmeasures/api/dev/sensor_data.py | 6 +- .../api/dev/tests/test_sensor_data.py | 2 +- flexmeasures/api/v1/implementations.py | 21 ++-- flexmeasures/api/v1_1/implementations.py | 32 ++--- .../api/v2_0/implementations/sensors.py | 112 ++++++++--------- flexmeasures/data/services/forecasting.py | 3 +- flexmeasures/data/services/scheduling.py | 3 +- .../data/tests/test_time_series_services.py | 2 +- flexmeasures/data/utils.py | 118 +++++++++++++++++- 11 files changed, 261 insertions(+), 101 deletions(-) diff --git a/flexmeasures/api/common/responses.py b/flexmeasures/api/common/responses.py index 46eab7caa..143512458 100644 --- a/flexmeasures/api/common/responses.py +++ b/flexmeasures/api/common/responses.py @@ -39,11 +39,25 @@ def deprecated_api_version(message: str) -> ResponseTuple: def already_received_and_successfully_processed(message: str) -> ResponseTuple: return ( dict( - results="Rejected", + results="PROCESSED", status="ALREADY_RECEIVED_AND_SUCCESSFULLY_PROCESSED", message=message, ), - 400, + 200, + ) + + +@BaseMessage( + "Some of the data represents a replacement, which is reserved for servers in play mode. Enable play mode or update the prior in your request." +) +def invalid_replacement(message: str) -> ResponseTuple: + return ( + dict( + results="Rejected", + status="INVALID_REPLACEMENT", + message=message, + ), + 403, ) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 89d7ac7ef..594026e1a 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -1,5 +1,4 @@ from timely_beliefs.beliefs.classes import BeliefsDataFrame -from flexmeasures.data.models.time_series import TimedBelief from typing import List, Sequence, Tuple, Union import copy from datetime import datetime, timedelta @@ -16,11 +15,12 @@ from flexmeasures.data.models.assets import Asset, Power from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.weather import WeatherSensor, Weather from flexmeasures.data.services.time_series import drop_unchanged_beliefs -from flexmeasures.data.utils import save_to_session +from flexmeasures.data.utils import save_to_session, save_to_db as modern_save_to_db from flexmeasures.api.common.responses import ( + invalid_replacement, unrecognized_sensor, ResponseTuple, request_processed, @@ -340,6 +340,35 @@ def get_sensor_by_generic_asset_type_and_location( return sensor +def enqueue_forecasting_jobs( + forecasting_jobs: List[Job] = None, +): + """Enqueue forecasting jobs. + + :param forecasting_jobs: list of forecasting Jobs for redis queues. + """ + if forecasting_jobs is not None: + [current_app.queues["forecasting"].enqueue_job(job) for job in forecasting_jobs] + + +def save_and_enqueue( + data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], + forecasting_jobs: List[Job] = None, + save_changed_beliefs_only: bool = True, +) -> ResponseTuple: + statuses = modern_save_to_db( + data, save_changed_beliefs_only=save_changed_beliefs_only + ) + enqueue_forecasting_jobs(forecasting_jobs) + if not isinstance(statuses, list): + statuses = [statuses] + if all([status == "success" for status in statuses]): + return request_processed() + elif all([status[:7] == "success" for status in statuses]): + return already_received_and_successfully_processed() + return invalid_replacement() + + def save_to_db( timed_values: Union[BeliefsDataFrame, List[Union[Power, Price, Weather]]], forecasting_jobs: List[Job] = [], @@ -349,7 +378,7 @@ def save_to_db( Data can only be replaced on servers in play mode. - TODO: remove options for Power, Price and Weather if we only handle beliefs one day. + TODO: remove this legacy function in its entirety (announced v0.8.0) :param timed_values: BeliefsDataFrame or a list of Power, Price or Weather values to be saved :param forecasting_jobs: list of forecasting Jobs for redis queues. @@ -357,6 +386,14 @@ def save_to_db( :returns: ResponseTuple """ + import warnings + + warnings.warn( + "The method api.common.utils.api_utils.save_to_db is deprecated. Check out the following replacements:" + "- [recommended option] to store BeliefsDataFrames only, switch to data.utils.save_to_db" + "- to store BeliefsDataFrames and enqueue jobs, switch to api.common.utils.api_utils.save_and_enqueue" + ) + if isinstance(timed_values, BeliefsDataFrame): if save_changed_beliefs_only: diff --git a/flexmeasures/api/dev/sensor_data.py b/flexmeasures/api/dev/sensor_data.py index fbb70f44b..a8dd758f6 100644 --- a/flexmeasures/api/dev/sensor_data.py +++ b/flexmeasures/api/dev/sensor_data.py @@ -1,7 +1,7 @@ from webargs.flaskparser import use_args from flexmeasures.api.common.schemas.sensor_data import SensorDataSchema -from flexmeasures.api.common.utils.api_utils import save_to_db +from flexmeasures.api.common.utils.api_utils import save_and_enqueue @use_args( @@ -15,13 +15,13 @@ def post_data(sensor_data): to create and save the data structure. """ beliefs = SensorDataSchema.load_bdf(sensor_data) - response, code = save_to_db(beliefs) + response, code = save_and_enqueue(beliefs) response.update(type="PostSensorDataResponse") return response, code def get_data(): - """ GET from /sensorData""" + """GET from /sensorData""" # - use data.models.time_series.Sensor::search_beliefs() - might need to add a belief_horizon parameter # - create the serialize method on the schema, to turn the resulting BeliefsDataFrame # to the JSON the API should respond with. diff --git a/flexmeasures/api/dev/tests/test_sensor_data.py b/flexmeasures/api/dev/tests/test_sensor_data.py index 9138b966e..f05c11068 100644 --- a/flexmeasures/api/dev/tests/test_sensor_data.py +++ b/flexmeasures/api/dev/tests/test_sensor_data.py @@ -77,5 +77,5 @@ def test_post_sensor_data_twice(client, setup_api_test_data): headers={"Authorization": auth_token}, ) print(response.json) - assert response.status_code == 400 + assert response.status_code == 200 assert "data has already been received" in response.json["message"] diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 2d41991bc..2c81fa3d6 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -11,7 +11,7 @@ parse_entity_address, EntityAddressException, ) -from flexmeasures.data.models.assets import Power +from flexmeasures.data import db from flexmeasures.data.models.data_sources import get_or_create_source from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.resources import get_sensors @@ -26,7 +26,7 @@ ) from flexmeasures.api.common.utils.api_utils import ( groups_to_dict, - save_to_db, + save_and_enqueue, ) from flexmeasures.api.common.utils.validators import ( type_accepted, @@ -253,7 +253,7 @@ def create_connection_and_value_groups( # noqa: C901 if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") user_sensor_ids = [sensor.id for sensor in user_sensors] - power_measurements = [] + power_df_per_connection = [] forecasting_jobs = [] for connection_group, value_group in zip(generic_asset_name_groups, value_groups): for connection in connection_group: @@ -293,7 +293,8 @@ def create_connection_and_value_groups( # noqa: C901 ) return power_value_too_big(extra_info) - # Create new Power objects + # Create a new BeliefsDataFrame + beliefs = [] for j, value in enumerate(value_group): dt = start + j * duration / len(value_group) if rolling: @@ -302,8 +303,7 @@ def create_connection_and_value_groups( # noqa: C901 h = horizon - ( (start + duration) - (dt + duration / len(value_group)) ) - p = Power( - use_legacy_kwargs=False, + p = TimedBelief( event_start=dt, event_value=value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption @@ -311,7 +311,10 @@ def create_connection_and_value_groups( # noqa: C901 sensor=sensor, source=data_source, ) - power_measurements.append(p) + + assert p not in db.session + beliefs.append(p) + power_df_per_connection.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves if horizon <= timedelta( @@ -323,8 +326,8 @@ def create_connection_and_value_groups( # noqa: C901 start, start + duration, resolution=duration / len(value_group), - enqueue=False, + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(power_measurements, forecasting_jobs) + return save_and_enqueue(power_df_per_connection, forecasting_jobs) diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index c3c08c5bd..9415e6826 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -4,6 +4,7 @@ from flask import current_app from flask_json import as_json from flask_security import current_user +import timely_beliefs as tb from flexmeasures.utils.entity_address_utils import ( parse_entity_address, @@ -16,7 +17,7 @@ invalid_horizon, ) from flexmeasures.api.common.utils.api_utils import ( - save_to_db, + save_and_enqueue, ) from flexmeasures.api.common.utils.migration_utils import get_sensor_by_unique_name from flexmeasures.api.common.utils.validators import ( @@ -41,8 +42,7 @@ get_sensor_by_generic_asset_type_and_location, ) from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather +from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.services.resources import get_sensors from flexmeasures.data.services.forecasting import create_forecasting_jobs @@ -84,7 +84,7 @@ def post_price_data_response( current_app.logger.info("POSTING PRICE DATA") data_source = get_or_create_source(current_user) - prices = [] + price_df_per_market = [] forecasting_jobs = [] for market_group, value_group in zip(generic_asset_name_groups, value_groups): for market in market_group: @@ -105,6 +105,7 @@ def post_price_data_response( return invalid_unit("%s prices" % sensor.name, [sensor.unit]) # Create new Price objects + beliefs = [] for j, value in enumerate(value_group): dt = start + j * duration / len(value_group) if rolling: @@ -113,15 +114,15 @@ def post_price_data_response( h = horizon - ( (start + duration) - (dt + duration / len(value_group)) ) - p = Price( - use_legacy_kwargs=False, + p = TimedBelief( event_start=dt, event_value=value, belief_horizon=h, sensor=sensor, source=data_source, ) - prices.append(p) + beliefs.append(p) + price_df_per_market.append(tb.BeliefsDataFrame(beliefs)) # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts, # by the way, due to things like day-ahead markets. @@ -133,10 +134,10 @@ def post_price_data_response( start + duration, resolution=duration / len(value_group), horizons=[timedelta(hours=24), timedelta(hours=48)], - enqueue=False, # will enqueue later, only if we successfully saved prices + enqueue=False, # will enqueue later, after saving data ) - return save_to_db(prices, forecasting_jobs) + return save_and_enqueue(price_df_per_market, forecasting_jobs) @type_accepted("PostWeatherDataRequest") @@ -160,7 +161,7 @@ def post_weather_data_response( # noqa: C901 current_app.logger.info("POSTING WEATHER DATA") data_source = get_or_create_source(current_user) - weather_measurements = [] + weather_df_per_sensor = [] forecasting_jobs = [] for sensor_group, value_group in zip(generic_asset_name_groups, value_groups): for sensor in sensor_group: @@ -189,6 +190,7 @@ def post_weather_data_response( # noqa: C901 return sensor # Create new Weather objects + beliefs = [] for j, value in enumerate(value_group): dt = start + j * duration / len(value_group) if rolling: @@ -197,15 +199,15 @@ def post_weather_data_response( # noqa: C901 h = horizon - ( (start + duration) - (dt + duration / len(value_group)) ) - w = Weather( - use_legacy_kwargs=False, + w = TimedBelief( event_start=dt, event_value=value, belief_horizon=h, sensor=sensor, source=data_source, ) - weather_measurements.append(w) + beliefs.append(w) + weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play) if current_app.config.get( @@ -219,11 +221,11 @@ def post_weather_data_response( # noqa: C901 start, start + duration, resolution=duration / len(value_group), - enqueue=False, # will enqueue later, only if we successfully saved weather measurements + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(weather_measurements, forecasting_jobs) + return save_and_enqueue(weather_df_per_sensor, forecasting_jobs) @type_accepted("GetPrognosisRequest") diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index ee6ce40a7..eec12b72d 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -3,6 +3,7 @@ from flask import current_app from flask_json import as_json from flask_security import current_user +import timely_beliefs as tb from flexmeasures.api.common.responses import ( invalid_domain, @@ -16,7 +17,7 @@ ) from flexmeasures.api.common.utils.api_utils import ( get_sensor_by_generic_asset_type_and_location, - save_to_db, + save_and_enqueue, determine_belief_timing, ) from flexmeasures.api.common.utils.validators import ( @@ -31,11 +32,8 @@ period_required, values_required, ) -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.data.services.resources import get_sensors from flexmeasures.utils.entity_address_utils import ( @@ -71,7 +69,7 @@ def post_price_data_response( # noqa C901 current_app.logger.info("POSTING PRICE DATA") data_source = get_or_create_source(current_user) - prices = [] + price_df_per_market = [] forecasting_jobs = [] for market_group, event_values in zip(generic_asset_name_groups, value_groups): for market in market_group: @@ -96,21 +94,19 @@ def post_price_data_response( # noqa C901 ) # Create new Price objects - prices.extend( - [ - Price( - use_legacy_kwargs=False, - event_start=event_start, - event_value=event_value, - belief_horizon=belief_horizon, - sensor=sensor, - source=data_source, - ) - for event_start, event_value, belief_horizon in zip( - event_starts, event_values, belief_horizons - ) - ] - ) + beliefs = [ + TimedBelief( + event_start=event_start, + event_value=event_value, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, + ) + for event_start, event_value, belief_horizon in zip( + event_starts, event_values, belief_horizons + ) + ] + price_df_per_market.append(tb.BeliefsDataFrame(beliefs)) # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts, # by the way, due to things like day-ahead markets. @@ -122,10 +118,10 @@ def post_price_data_response( # noqa C901 start + duration, resolution=duration / len(event_values), horizons=[timedelta(hours=24), timedelta(hours=48)], - enqueue=False, # will enqueue later, only if we successfully saved prices + enqueue=False, # will enqueue later, after saving data ) - return save_to_db(prices, forecasting_jobs) + return save_and_enqueue(price_df_per_market, forecasting_jobs) @type_accepted("PostWeatherDataRequest") @@ -154,7 +150,7 @@ def post_weather_data_response( # noqa: C901 current_app.logger.info("POSTING WEATHER DATA") data_source = get_or_create_source(current_user) - weather_measurements = [] + weather_df_per_sensor = [] forecasting_jobs = [] for sensor_group, event_values in zip(generic_asset_name_groups, value_groups): for sensor in sensor_group: @@ -183,21 +179,19 @@ def post_weather_data_response( # noqa: C901 ) # Create new Weather objects - weather_measurements.extend( - [ - Weather( - use_legacy_kwargs=False, - event_start=event_start, - event_value=event_value, - belief_horizon=belief_horizon, - sensor=sensor, - source=data_source, - ) - for event_start, event_value, belief_horizon in zip( - event_starts, event_values, belief_horizons - ) - ] - ) + beliefs = [ + TimedBelief( + event_start=event_start, + event_value=event_value, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, + ) + for event_start, event_value, belief_horizon in zip( + event_starts, event_values, belief_horizons + ) + ] + weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play) if current_app.config.get( @@ -212,11 +206,11 @@ def post_weather_data_response( # noqa: C901 start + duration, resolution=duration / len(event_values), horizons=[horizon], - enqueue=False, # will enqueue later, only if we successfully saved weather measurements + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(weather_measurements, forecasting_jobs) + return save_and_enqueue(weather_df_per_sensor, forecasting_jobs) @type_accepted("PostMeterDataRequest") @@ -307,7 +301,7 @@ def post_power_data( if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") user_sensor_ids = [sensor.id for sensor in user_sensors] - power_measurements = [] + power_df_per_connection = [] forecasting_jobs = [] for connection_group, event_values in zip(generic_asset_name_groups, value_groups): for connection in connection_group: @@ -351,22 +345,20 @@ def post_power_data( ) # Create new Power objects - power_measurements.extend( - [ - Power( - use_legacy_kwargs=False, - event_start=event_start, - event_value=event_value - * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption - belief_horizon=belief_horizon, - sensor=sensor, - source=data_source, - ) - for event_start, event_value, belief_horizon in zip( - event_starts, event_values, belief_horizons - ) - ] - ) + beliefs = [ + TimedBelief( + event_start=event_start, + event_value=event_value + * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, + ) + for event_start, event_value, belief_horizon in zip( + event_starts, event_values, belief_horizons + ) + ] + power_df_per_connection.append(tb.BeliefsDataFrame(beliefs)) if create_forecasting_jobs_too: forecasting_jobs.extend( @@ -375,8 +367,8 @@ def post_power_data( start, start + duration, resolution=duration / len(event_values), - enqueue=False, # will enqueue later, only if we successfully saved power measurements + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(power_measurements, forecasting_jobs) + return save_and_enqueue(power_df_per_connection, forecasting_jobs) diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 6e9e9e176..ad05358a6 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -8,7 +8,6 @@ from timetomodel.forecasting import make_rolling_forecasts import timely_beliefs as tb -from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException @@ -17,7 +16,7 @@ get_query_window, check_data_availability, ) -from flexmeasures.data.utils import get_data_source +from flexmeasures.data.utils import get_data_source, save_to_db from flexmeasures.utils.time_utils import ( as_server_time, server_now, diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index d80150a49..1ea875994 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -10,12 +10,11 @@ from rq.job import Job import timely_beliefs as tb -from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.utils import get_data_source +from flexmeasures.data.utils import get_data_source, save_to_db """ The life cycle of a scheduling job: diff --git a/flexmeasures/data/tests/test_time_series_services.py b/flexmeasures/data/tests/test_time_series_services.py index 5125e7c48..9d9589aec 100644 --- a/flexmeasures/data/tests/test_time_series_services.py +++ b/flexmeasures/data/tests/test_time_series_services.py @@ -1,7 +1,7 @@ import pandas as pd from timely_beliefs import utils as tb_utils -from flexmeasures.api.common.utils.api_utils import save_to_db +from flexmeasures.data.utils import save_to_db from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index fe78d8a81..9d74404db 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -1,9 +1,14 @@ -from typing import List, Optional +from typing import List, Optional, Union import click +from flask import current_app +from sqlalchemy.exc import IntegrityError +from timely_beliefs import BeliefsDataFrame -from flexmeasures.data.config import db +from flexmeasures.data import db from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief +from flexmeasures.data.services.time_series import drop_unchanged_beliefs def save_to_session(objects: List[db.Model], overwrite: bool = False): @@ -44,3 +49,112 @@ def get_data_source( f'Session updated with new {data_source_type} data source "{data_source.__repr__()}".' ) return data_source + + +def save_to_db( + data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], + save_changed_beliefs_only: bool = True, +) -> Union[str, List[str]]: + """Save the timed beliefs to the database. + + We make the distinction between updating beliefs and replacing beliefs. + + # Updating beliefs + + An updated belief is a belief from the same source as some already saved belief, and about the same event, + but with a later belief time. If it has a different event value, then it represents a changed belief. + Note that it is possible to explicitly record unchanged beliefs (i.e. updated beliefs with a later belief time, + but with the same event value), by setting save_changed_beliefs_only to False. + + # Replacing beliefs + + A replaced belief is a belief from the same source as some already saved belief, + and about the same event and with the same belief time, but with a different event value. + Replacing beliefs is not allowed, except on servers in play mode. + + :param data: BeliefsDataFrame (or a list thereof) to be saved + :param save_changed_beliefs_only: if True, beliefs that are already stored in the database with an earlier belief time are dropped. + :returns: status string (or a list thereof), one of the following: + - 'success': all beliefs were saved + - 'success_with_replacements': all beliefs were saves, (some) replacing pre-existing beliefs + - 'success_but_data_empty': there was nothing to save + - 'success_but_nothing_new': no beliefs represented a state change + - 'success_but_partially_new': not all beliefs represented a state change + - 'failed_due_to_forbidden_replacements': no beliefs were saved, because replacing pre-existing beliefs is forbidden + """ + + # Convert to list + if not isinstance(data, list): + timed_values_list = [data] + else: + timed_values_list = data + + success_list = [] + for timed_values in timed_values_list: + + if timed_values.empty: + # Nothing to save + success_list.append("success_but_data_empty") + continue + + len_before = len(timed_values) + if save_changed_beliefs_only: + + # Drop beliefs that haven't changed + timed_values = ( + timed_values.convert_index_from_belief_horizon_to_time() + .groupby(level=["belief_time", "source"], as_index=False) + .apply(drop_unchanged_beliefs) + ) + len_after = len(timed_values) + + # Work around bug in which groupby still introduces an index level, even though we asked it not to + if None in timed_values.index.names: + timed_values.index = timed_values.index.droplevel(None) + + if timed_values.empty: + # No state changes among the beliefs + success_list.append("success_but_nothing_new") + continue + else: + len_after = len_before + + # if timed_values.empty or (save_changed_beliefs_only and len_after < len_before): + # current_app.logger.info("Nothing new to save") + # success_list.append(False) # no data or data already existed or data doesn't represent updated beliefs + # else: + current_app.logger.info("SAVING TO DB...") + try: + TimedBelief.add_to_session( + session=db.session, beliefs_data_frame=timed_values + ) + db.session.flush() + db.session.commit() + if len_after < len_before: + # new data was saved + success_list.append("success_but_partially_new") + else: + # all data was saved + success_list.append("success") + except IntegrityError as e: + current_app.logger.warning(e) + db.session.rollback() + + # Allow data to be replaced only in play mode + if current_app.config.get("FLEXMEASURES_MODE", "") == "play": + TimedBelief.add_to_session( + session=db.session, + beliefs_data_frame=timed_values, + allow_overwrite=True, + ) + db.session.commit() + # some beliefs have been replaced, which was allowed + success_list.append("success_with_replacements") + else: + # some beliefs represented replacements, which was forbidden + success_list.append("failed_due_to_forbidden_replacements") + + # Return a success indicator for each BeliefsDataFrame + if not isinstance(data, list): + return success_list[0] + return success_list From 24a785e4ffa4bd8b8b62481901a91d00f1de2d38 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Tue, 28 Dec 2021 14:14:35 +0100 Subject: [PATCH 34/63] CLI uses TimedBeliefs only Signed-off-by: F.N. Claessen --- flexmeasures/cli/testing.py | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/flexmeasures/cli/testing.py b/flexmeasures/cli/testing.py index de5621788..f723f6137 100644 --- a/flexmeasures/cli/testing.py +++ b/flexmeasures/cli/testing.py @@ -12,7 +12,6 @@ else: from rq import Worker -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.queries.sensors import ( @@ -41,11 +40,11 @@ def test_making_forecasts(): sensor_id = 1 forecast_filter = ( - Power.query.filter(Power.sensor_id == sensor_id) - .filter(Power.horizon == timedelta(hours=6)) + TimedBelief.query.filter(TimedBelief.sensor_id == sensor_id) + .filter(TimedBelief.belief_horizon == timedelta(hours=6)) .filter( - (Power.datetime >= as_server_time(datetime(2015, 4, 1, 6))) - & (Power.datetime < as_server_time(datetime(2015, 4, 3, 6))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 4, 1, 6))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 4, 3, 6))) ) ) @@ -86,12 +85,6 @@ def test_making_forecasts(): required=True, help="Name of generic asset type.", ) -@click.option( - "--timed-value-type", - "timed_value_type", - required=True, - help="Power, Price or Weather.", -) @click.option("--sensor", "sensor_name", help="Name of sensor.") @click.option( "--from_date", @@ -107,7 +100,6 @@ def test_making_forecasts(): ) def test_generic_model( generic_asset_type_names: List[str], - timed_value_type_name: str, sensor_name: Optional[str] = None, from_date: str = "2015-03-10", period: int = 3, @@ -133,16 +125,6 @@ def test_generic_model( click.echo("No unique sensor found in db, so I will not add any forecasts.") return - # todo: replacing this with timed_value_type = TimedBelief requires streamlining of the collect function on old sensor data classes with the search function on the TimedBelief class - if timed_value_type_name.lower() == "Power": - from flexmeasures.data.models.assets import Power as TimedValueType - elif timed_value_type_name.lower() == "Price": - from flexmeasures.data.models.markets import Price as TimedValueType - elif timed_value_type_name.lower() == "Weather": - from flexmeasures.data.models.weather import Weather as TimedValueType - else: - raise ValueError(f"Unknown timed value type {timed_value_type_name}") - linear_model_configurator = lookup_model_specs_configurator("linear") ( model_specs, From d5f181d2ddb4ec0f138e35a90f215cb3a2a58c3c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Tue, 28 Dec 2021 14:18:18 +0100 Subject: [PATCH 35/63] Data scripts use TimedBeliefs only Signed-off-by: F.N. Claessen --- flexmeasures/data/scripts/grid_weather.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/flexmeasures/data/scripts/grid_weather.py b/flexmeasures/data/scripts/grid_weather.py index ace75c637..9c8e7282d 100755 --- a/flexmeasures/data/scripts/grid_weather.py +++ b/flexmeasures/data/scripts/grid_weather.py @@ -15,9 +15,8 @@ from flexmeasures.data.services.resources import find_closest_sensor from flexmeasures.data.config import db from flexmeasures.data.transactional import task_with_status_report -from flexmeasures.data.models.weather import Weather from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief FILE_PATH_LOCATION = "/../raw_data/weather-forecasts" DATA_SOURCE_NAME = "OpenWeatherMap" @@ -416,8 +415,7 @@ def save_forecasts_in_db( ) db_forecasts.append( - Weather( - use_legacy_kwargs=False, + TimedBelief( event_start=fc_datetime, belief_horizon=fc_horizon, event_value=fc_value, From 314f70088618a836a0557e0738dee1ea79cd8f31 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Tue, 28 Dec 2021 14:22:49 +0100 Subject: [PATCH 36/63] One more conftest switched to creating TimedBeliefs instead of Weather objects Signed-off-by: F.N. Claessen --- flexmeasures/data/tests/conftest.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index a0fa76fc4..aff5572bf 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -12,7 +12,7 @@ from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import TimedBelief -from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather +from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor from flexmeasures.data.models.forecasting import model_map from flexmeasures.data.models.forecasting.model_spec_factory import ( create_initial_model_specs, @@ -133,8 +133,7 @@ def add_test_weather_sensor_and_forecasts(db: SQLAlchemy): values = [value * 600 for value in values] for dt, val in zip(time_slots, values): db.session.add( - Weather( - use_legacy_kwargs=False, + TimedBelief( sensor=sensor.corresponding_sensor, event_start=as_server_time(dt), event_value=val, From 9ffd44904dd8ac0851e5c15db298713ce8a6373a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 09:56:08 +0100 Subject: [PATCH 37/63] Expand docstring note on forbidden replacements Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 9d74404db..b038a85d2 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -70,7 +70,9 @@ def save_to_db( A replaced belief is a belief from the same source as some already saved belief, and about the same event and with the same belief time, but with a different event value. - Replacing beliefs is not allowed, except on servers in play mode. + Replacing beliefs is not allowed, because messing with the history corrupts data lineage. + Corrections should instead be recorded as updated beliefs. + Servers in 'play' mode are excempted from this rule, to facilitate replaying simulations. :param data: BeliefsDataFrame (or a list thereof) to be saved :param save_changed_beliefs_only: if True, beliefs that are already stored in the database with an earlier belief time are dropped. From 9b1fb22e0cc3ff21b39eb56511c3a18f68c227c4 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 09:59:57 +0100 Subject: [PATCH 38/63] Clarify docstring note on saving changed beliefs only Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index b038a85d2..d132c0f09 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -75,7 +75,7 @@ def save_to_db( Servers in 'play' mode are excempted from this rule, to facilitate replaying simulations. :param data: BeliefsDataFrame (or a list thereof) to be saved - :param save_changed_beliefs_only: if True, beliefs that are already stored in the database with an earlier belief time are dropped. + :param save_changed_beliefs_only: if True, updated beliefs are only stored if they represent changed beliefs :returns: status string (or a list thereof), one of the following: - 'success': all beliefs were saved - 'success_with_replacements': all beliefs were saves, (some) replacing pre-existing beliefs From 229e2307260791c019c8075bf3c069b2c3278ef2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 10:04:13 +0100 Subject: [PATCH 39/63] Remove redundant flush Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index d132c0f09..a57c0c566 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -130,7 +130,6 @@ def save_to_db( TimedBelief.add_to_session( session=db.session, beliefs_data_frame=timed_values ) - db.session.flush() db.session.commit() if len_after < len_before: # new data was saved From fa563f3bb9763dd12b397e974f76cdca89a739df Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 10:27:29 +0100 Subject: [PATCH 40/63] Catch forbidden belief replacements with more specific exception Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index a57c0c566..0297cbb92 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -2,6 +2,7 @@ import click from flask import current_app +from psycopg2.errors import UniqueViolation from sqlalchemy.exc import IntegrityError from timely_beliefs import BeliefsDataFrame @@ -151,9 +152,12 @@ def save_to_db( db.session.commit() # some beliefs have been replaced, which was allowed success_list.append("success_with_replacements") - else: + elif isinstance(e.orig, UniqueViolation): # some beliefs represented replacements, which was forbidden success_list.append("failed_due_to_forbidden_replacements") + else: + # reraise + raise e.orig # Return a success indicator for each BeliefsDataFrame if not isinstance(data, list): From 95ccb1512e350b8362b28a7754c7be6d5b2d4fde Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 10:28:14 +0100 Subject: [PATCH 41/63] Rename variable Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 0297cbb92..eb79f3081 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -92,12 +92,12 @@ def save_to_db( else: timed_values_list = data - success_list = [] + status_list = [] for timed_values in timed_values_list: if timed_values.empty: # Nothing to save - success_list.append("success_but_data_empty") + status_list.append("success_but_data_empty") continue len_before = len(timed_values) @@ -117,7 +117,7 @@ def save_to_db( if timed_values.empty: # No state changes among the beliefs - success_list.append("success_but_nothing_new") + status_list.append("success_but_nothing_new") continue else: len_after = len_before @@ -134,10 +134,10 @@ def save_to_db( db.session.commit() if len_after < len_before: # new data was saved - success_list.append("success_but_partially_new") + status_list.append("success_but_partially_new") else: # all data was saved - success_list.append("success") + status_list.append("success") except IntegrityError as e: current_app.logger.warning(e) db.session.rollback() @@ -151,15 +151,15 @@ def save_to_db( ) db.session.commit() # some beliefs have been replaced, which was allowed - success_list.append("success_with_replacements") + status_list.append("success_with_replacements") elif isinstance(e.orig, UniqueViolation): # some beliefs represented replacements, which was forbidden - success_list.append("failed_due_to_forbidden_replacements") + status_list.append("failed_due_to_forbidden_replacements") else: # reraise raise e.orig # Return a success indicator for each BeliefsDataFrame if not isinstance(data, list): - return success_list[0] - return success_list + return status_list[0] + return status_list From 57875e45cee8c17634c9f828219892bbbc351ad6 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 11:05:20 +0100 Subject: [PATCH 42/63] One transaction per request Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/api_utils.py | 8 +- flexmeasures/data/utils.py | 88 ++++++++++------------ 2 files changed, 42 insertions(+), 54 deletions(-) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 594026e1a..5d316d44b 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -356,15 +356,13 @@ def save_and_enqueue( forecasting_jobs: List[Job] = None, save_changed_beliefs_only: bool = True, ) -> ResponseTuple: - statuses = modern_save_to_db( + status = modern_save_to_db( data, save_changed_beliefs_only=save_changed_beliefs_only ) enqueue_forecasting_jobs(forecasting_jobs) - if not isinstance(statuses, list): - statuses = [statuses] - if all([status == "success" for status in statuses]): + if status == "success": return request_processed() - elif all([status[:7] == "success" for status in statuses]): + elif status[:7] == "success": return already_received_and_successfully_processed() return invalid_replacement() diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index eb79f3081..da8a2baa3 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -55,7 +55,8 @@ def get_data_source( def save_to_db( data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], save_changed_beliefs_only: bool = True, -) -> Union[str, List[str]]: + allow_overwrite: bool = False, +) -> str: """Save the timed beliefs to the database. We make the distinction between updating beliefs and replacing beliefs. @@ -76,13 +77,14 @@ def save_to_db( Servers in 'play' mode are excempted from this rule, to facilitate replaying simulations. :param data: BeliefsDataFrame (or a list thereof) to be saved - :param save_changed_beliefs_only: if True, updated beliefs are only stored if they represent changed beliefs - :returns: status string (or a list thereof), one of the following: + :param save_changed_beliefs_only: if True, unchanged beliefs are skipped (updated beliefs are only stored if they represent changed beliefs) + if False, all updated beliefs are stored + :param allow_overwrite: if True, already stored beliefs may be replaced + if False, already stored beliefs may not be replaced + :returns: status string, one of the following: - 'success': all beliefs were saved - - 'success_with_replacements': all beliefs were saves, (some) replacing pre-existing beliefs - - 'success_but_data_empty': there was nothing to save - - 'success_but_nothing_new': no beliefs represented a state change - - 'success_but_partially_new': not all beliefs represented a state change + - 'success_with_replacements': all beliefs were saved, (possibly) replacing pre-existing beliefs + - 'success_with_unchanged_beliefs_skipped': not all beliefs represented a state change - 'failed_due_to_forbidden_replacements': no beliefs were saved, because replacing pre-existing beliefs is forbidden """ @@ -92,12 +94,11 @@ def save_to_db( else: timed_values_list = data - status_list = [] + status = "success" if not allow_overwrite else "success_with_replacements" for timed_values in timed_values_list: if timed_values.empty: # Nothing to save - status_list.append("success_but_data_empty") continue len_before = len(timed_values) @@ -110,6 +111,8 @@ def save_to_db( .apply(drop_unchanged_beliefs) ) len_after = len(timed_values) + if len_after < len_before: + status = "success_with_unchanged_beliefs_skipped" # Work around bug in which groupby still introduces an index level, even though we asked it not to if None in timed_values.index.names: @@ -117,49 +120,36 @@ def save_to_db( if timed_values.empty: # No state changes among the beliefs - status_list.append("success_but_nothing_new") continue else: len_after = len_before - # if timed_values.empty or (save_changed_beliefs_only and len_after < len_before): - # current_app.logger.info("Nothing new to save") - # success_list.append(False) # no data or data already existed or data doesn't represent updated beliefs - # else: current_app.logger.info("SAVING TO DB...") - try: - TimedBelief.add_to_session( - session=db.session, beliefs_data_frame=timed_values + TimedBelief.add_to_session( + session=db.session, + beliefs_data_frame=timed_values, + allow_overwrite=allow_overwrite, + ) + try: + db.session.commit() + except IntegrityError as e: + current_app.logger.warning(e) + db.session.rollback() + + # Catch only unique violations + if not isinstance(e.orig, UniqueViolation): + # reraise + raise e.orig + + # Allow data to be replaced only in play mode + if current_app.config.get("FLEXMEASURES_MODE", "") == "play": + status = save_to_db( + data=data, + save_changed_beliefs_only=save_changed_beliefs_only, + allow_overwrite=True, ) - db.session.commit() - if len_after < len_before: - # new data was saved - status_list.append("success_but_partially_new") - else: - # all data was saved - status_list.append("success") - except IntegrityError as e: - current_app.logger.warning(e) - db.session.rollback() - - # Allow data to be replaced only in play mode - if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - TimedBelief.add_to_session( - session=db.session, - beliefs_data_frame=timed_values, - allow_overwrite=True, - ) - db.session.commit() - # some beliefs have been replaced, which was allowed - status_list.append("success_with_replacements") - elif isinstance(e.orig, UniqueViolation): - # some beliefs represented replacements, which was forbidden - status_list.append("failed_due_to_forbidden_replacements") - else: - # reraise - raise e.orig - - # Return a success indicator for each BeliefsDataFrame - if not isinstance(data, list): - return status_list[0] - return status_list + else: + # some beliefs represented replacements, which was forbidden + status = "failed_due_to_forbidden_replacements" + + return status From c514cb58bc068b4d521cb90c66a3eb05e4050691 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 11:09:42 +0100 Subject: [PATCH 43/63] Only enqueue forecasting jobs upon successfully saving new data Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/api_utils.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 5d316d44b..072c48606 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -356,10 +356,17 @@ def save_and_enqueue( forecasting_jobs: List[Job] = None, save_changed_beliefs_only: bool = True, ) -> ResponseTuple: + + # Attempt to save status = modern_save_to_db( data, save_changed_beliefs_only=save_changed_beliefs_only ) - enqueue_forecasting_jobs(forecasting_jobs) + + # Only enqueue forecasting jobs upon successfully saving new data + if status[:7] == "success": + enqueue_forecasting_jobs(forecasting_jobs) + + # Pick a response if status == "success": return request_processed() elif status[:7] == "success": From dda69a9a29780abee6ce3d79d96c7cd18481a6ca Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 11:36:49 +0100 Subject: [PATCH 44/63] Flush instead of commit Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index da8a2baa3..269cd7a58 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -131,7 +131,8 @@ def save_to_db( allow_overwrite=allow_overwrite, ) try: - db.session.commit() + # Flush to check for unique violations (due to attempting to replace beliefs) + db.session.flush() except IntegrityError as e: current_app.logger.warning(e) db.session.rollback() From 2d5f7212b48c5c0568286f3af0192130319678ba Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 11:41:59 +0100 Subject: [PATCH 45/63] Expand test for forbidden data replacement Signed-off-by: F.N. Claessen --- flexmeasures/api/dev/tests/test_sensor_data.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/flexmeasures/api/dev/tests/test_sensor_data.py b/flexmeasures/api/dev/tests/test_sensor_data.py index f05c11068..027d760e2 100644 --- a/flexmeasures/api/dev/tests/test_sensor_data.py +++ b/flexmeasures/api/dev/tests/test_sensor_data.py @@ -65,12 +65,16 @@ def test_post_invalid_sensor_data( def test_post_sensor_data_twice(client, setup_api_test_data): auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") post_data = make_sensor_data_request() + + # Check that 1st time posting the data succeeds response = client.post( url_for("post_sensor_data"), json=post_data, headers={"Authorization": auth_token}, ) assert response.status_code == 200 + + # Check that 2nd time posting the same data succeeds informatively response = client.post( url_for("post_sensor_data"), json=post_data, @@ -79,3 +83,14 @@ def test_post_sensor_data_twice(client, setup_api_test_data): print(response.json) assert response.status_code == 200 assert "data has already been received" in response.json["message"] + + # Check that replacing data fails informatively + post_data["values"][0] = 100 + response = client.post( + url_for("post_sensor_data"), + json=post_data, + headers={"Authorization": auth_token}, + ) + print(response.json) + assert response.status_code == 403 + assert "data represents a replacement" in response.json["message"] From cd6b8c6d0afa993e2318f71fe33036b5112e2f90 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 12:09:41 +0100 Subject: [PATCH 46/63] Simplify play mode excemption for replacing beliefs Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/api_utils.py | 2 +- flexmeasures/data/utils.py | 22 ++++++---------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 072c48606..b6aafe99d 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -369,7 +369,7 @@ def save_and_enqueue( # Pick a response if status == "success": return request_processed() - elif status[:7] == "success": + elif status == "success_with_unchanged_beliefs_skipped": return already_received_and_successfully_processed() return invalid_replacement() diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 269cd7a58..9cc4ee107 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -55,7 +55,6 @@ def get_data_source( def save_to_db( data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], save_changed_beliefs_only: bool = True, - allow_overwrite: bool = False, ) -> str: """Save the timed beliefs to the database. @@ -79,11 +78,8 @@ def save_to_db( :param data: BeliefsDataFrame (or a list thereof) to be saved :param save_changed_beliefs_only: if True, unchanged beliefs are skipped (updated beliefs are only stored if they represent changed beliefs) if False, all updated beliefs are stored - :param allow_overwrite: if True, already stored beliefs may be replaced - if False, already stored beliefs may not be replaced :returns: status string, one of the following: - 'success': all beliefs were saved - - 'success_with_replacements': all beliefs were saved, (possibly) replacing pre-existing beliefs - 'success_with_unchanged_beliefs_skipped': not all beliefs represented a state change - 'failed_due_to_forbidden_replacements': no beliefs were saved, because replacing pre-existing beliefs is forbidden """ @@ -94,7 +90,7 @@ def save_to_db( else: timed_values_list = data - status = "success" if not allow_overwrite else "success_with_replacements" + status = "success" for timed_values in timed_values_list: if timed_values.empty: @@ -128,7 +124,9 @@ def save_to_db( TimedBelief.add_to_session( session=db.session, beliefs_data_frame=timed_values, - allow_overwrite=allow_overwrite, + allow_overwrite=False + if current_app.config.get("FLEXMEASURES_MODE", "") != "play" + else True, ) try: # Flush to check for unique violations (due to attempting to replace beliefs) @@ -142,15 +140,7 @@ def save_to_db( # reraise raise e.orig - # Allow data to be replaced only in play mode - if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - status = save_to_db( - data=data, - save_changed_beliefs_only=save_changed_beliefs_only, - allow_overwrite=True, - ) - else: - # some beliefs represented replacements, which was forbidden - status = "failed_due_to_forbidden_replacements" + # Some beliefs represented replacements, which was forbidden + status = "failed_due_to_forbidden_replacements" return status From d7c05124f28d07d6febd4759b83aad08f2c5677b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 13:21:06 +0100 Subject: [PATCH 47/63] Add note about potential session rollback Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 9cc4ee107..bfcadeb68 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -58,6 +58,8 @@ def save_to_db( ) -> str: """Save the timed beliefs to the database. + NB Roles back session in case of IntegrityError upon flushing the session. Best to keep transactions short. + We make the distinction between updating beliefs and replacing beliefs. # Updating beliefs From bb7171a7f2b0ffde03b24a089cf1d2c7f8c2b8a7 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 13:25:18 +0100 Subject: [PATCH 48/63] Typo Signed-off-by: F.N. Claessen --- flexmeasures/data/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index bfcadeb68..952d624bc 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -75,7 +75,7 @@ def save_to_db( and about the same event and with the same belief time, but with a different event value. Replacing beliefs is not allowed, because messing with the history corrupts data lineage. Corrections should instead be recorded as updated beliefs. - Servers in 'play' mode are excempted from this rule, to facilitate replaying simulations. + Servers in 'play' mode are exempt from this rule, to facilitate replaying simulations. :param data: BeliefsDataFrame (or a list thereof) to be saved :param save_changed_beliefs_only: if True, unchanged beliefs are skipped (updated beliefs are only stored if they represent changed beliefs) From fec17b734f975340194e7925f1cb76508671c371 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 17:50:58 +0100 Subject: [PATCH 49/63] Move UniqueViolation catching logic to error handler Signed-off-by: F.N. Claessen --- flexmeasures/api/__init__.py | 22 +++++++++++++++++++++- flexmeasures/data/utils.py | 20 +++----------------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/flexmeasures/api/__init__.py b/flexmeasures/api/__init__.py index e5daa8017..b2a014e6f 100644 --- a/flexmeasures/api/__init__.py +++ b/flexmeasures/api/__init__.py @@ -2,14 +2,18 @@ from flask_security.utils import verify_password from flask_json import as_json from flask_login import current_user +from psycopg2.errors import UniqueViolation +from sqlalchemy.exc import IntegrityError +from werkzeug.exceptions import HTTPException from flexmeasures import __version__ as flexmeasures_version from flexmeasures.data.models.user import User from flexmeasures.api.common.utils.args_parsing import ( validation_error_handler, ) -from flexmeasures.api.common.responses import invalid_sender +from flexmeasures.api.common.responses import invalid_replacement, invalid_sender from flexmeasures.data.schemas.utils import FMValidationError +from flexmeasures.utils.error_utils import error_handling_router # The api blueprint. It is registered with the Flask app (see app.py) flexmeasures_api = Blueprint("flexmeasures_api", __name__) @@ -84,6 +88,7 @@ def register_at(app: Flask): # handle API specific errors app.register_error_handler(FMValidationError, validation_error_handler) + app.register_error_handler(IntegrityError, catch_timed_belief_replacements) app.unauthorized_handler_api = invalid_sender app.register_blueprint( @@ -115,3 +120,18 @@ def register_at(app: Flask): v1_3_register_at(app) v2_0_register_at(app) dev_register_at(app) + + +def catch_timed_belief_replacements(error: IntegrityError): + """Catch IntegrityErrors due to a UniqueViolation on the TimedBelief primary key. + + Return a more informative message. + """ + if isinstance(error.orig, UniqueViolation) and "timed_belief_pkey" in str( + error.orig + ): + # Some beliefs represented replacements, which was forbidden + return invalid_replacement() + + # Forward to our generic error handler + return error_handling_router(error) diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 952d624bc..4543eb681 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -58,7 +58,7 @@ def save_to_db( ) -> str: """Save the timed beliefs to the database. - NB Roles back session in case of IntegrityError upon flushing the session. Best to keep transactions short. + NB Flushing the session. Best to keep transactions short. We make the distinction between updating beliefs and replacing beliefs. @@ -83,7 +83,6 @@ def save_to_db( :returns: status string, one of the following: - 'success': all beliefs were saved - 'success_with_unchanged_beliefs_skipped': not all beliefs represented a state change - - 'failed_due_to_forbidden_replacements': no beliefs were saved, because replacing pre-existing beliefs is forbidden """ # Convert to list @@ -130,19 +129,6 @@ def save_to_db( if current_app.config.get("FLEXMEASURES_MODE", "") != "play" else True, ) - try: - # Flush to check for unique violations (due to attempting to replace beliefs) - db.session.flush() - except IntegrityError as e: - current_app.logger.warning(e) - db.session.rollback() - - # Catch only unique violations - if not isinstance(e.orig, UniqueViolation): - # reraise - raise e.orig - - # Some beliefs represented replacements, which was forbidden - status = "failed_due_to_forbidden_replacements" - + # Flush to bring up potential unique violations (due to attempting to replace beliefs) + db.session.flush() return status From 31ec00ae7bfe33d583fb495dad7cf15bbb6ea917 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 30 Dec 2021 19:31:22 +0100 Subject: [PATCH 50/63] flake8 Signed-off-by: F.N. Claessen --- flexmeasures/api/__init__.py | 1 - flexmeasures/data/utils.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/flexmeasures/api/__init__.py b/flexmeasures/api/__init__.py index b2a014e6f..62eedb937 100644 --- a/flexmeasures/api/__init__.py +++ b/flexmeasures/api/__init__.py @@ -4,7 +4,6 @@ from flask_login import current_user from psycopg2.errors import UniqueViolation from sqlalchemy.exc import IntegrityError -from werkzeug.exceptions import HTTPException from flexmeasures import __version__ as flexmeasures_version from flexmeasures.data.models.user import User diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 4543eb681..7d2dd3649 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -2,8 +2,6 @@ import click from flask import current_app -from psycopg2.errors import UniqueViolation -from sqlalchemy.exc import IntegrityError from timely_beliefs import BeliefsDataFrame from flexmeasures.data import db From a2fc26b81bef8d9fc30a7ab668109cb3a7ac96d1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 13:42:15 +0100 Subject: [PATCH 51/63] Rewrite solver to deal with asymmetry in up and down commitment prices Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/solver.py | 32 ++++++++++++++++----- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index 78341b5e8..dc73c465e 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -10,6 +10,8 @@ RangeSet, Param, Reals, + NonNegativeReals, + NonPositiveReals, Constraint, Objective, minimize, @@ -182,6 +184,12 @@ def ems_derivative_min_select(m, j): # Add variables model.power = Var(model.d, model.j, domain=Reals, initialize=0) + model.ems_power_deviation_down = Var( + model.c, model.j, domain=NonPositiveReals, initialize=0 + ) + model.ems_power_deviation_up = Var( + model.c, model.j, domain=NonNegativeReals, initialize=0 + ) # Add constraints as a tuple of (lower bound, value, upper bound) def device_bounds(m, d, j): @@ -201,23 +209,33 @@ def device_derivative_bounds(m, d, j): def ems_derivative_bounds(m, j): return m.ems_derivative_min[j], sum(m.power[:, j]), m.ems_derivative_max[j] + def power_commitment_equality(m, j): + """Total power (sum over devices) should equal sum of commitments and deviations from commitments.""" + return ( + 0, + sum(m.commitment_quantity[:, j]) + + sum(m.ems_power_deviation_down[:, j]) + + sum(m.ems_power_deviation_up[:, j]) + - sum(m.power[:, j]), + 0, + ) + model.device_energy_bounds = Constraint(model.d, model.j, rule=device_bounds) model.device_power_bounds = Constraint( model.d, model.j, rule=device_derivative_bounds ) model.ems_power_bounds = Constraint(model.j, rule=ems_derivative_bounds) + model.power_commitment_equality = Constraint( + model.j, rule=power_commitment_equality + ) # Add objective def cost_function(m): costs = 0 for c in m.c: for j in m.j: - ems_power_in_j = sum(m.power[d, j] for d in m.d) - ems_power_deviation = ems_power_in_j - m.commitment_quantity[c, j] - if value(ems_power_deviation) >= 0: - costs += ems_power_deviation * m.up_price[c, j] - else: - costs += ems_power_deviation * m.down_price[c, j] + costs += m.ems_power_deviation_down[c, j] * m.down_price[c, j] + costs += m.ems_power_deviation_up[c, j] * m.up_price[c, j] return costs model.costs = Objective(rule=cost_function, sense=minimize) @@ -243,5 +261,5 @@ def cost_function(m): # model.pprint() # print(results.solver.termination_condition) # print(planned_costs) - # input() + # model.display() return planned_power_per_device, planned_costs, results From 9f3ab24f21c08b5ec517d06a3cfb223b2d03b7cb Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 13:45:01 +0100 Subject: [PATCH 52/63] Add optional roundtrip_efficiency field to UDI events, and use it to scale prices Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_3/implementations.py | 4 ++++ flexmeasures/data/models/planning/battery.py | 16 ++++++++++++++++ .../data/models/planning/charging_station.py | 18 +++++++++++++++++- flexmeasures/data/services/scheduling.py | 19 +++++++++++++++++-- 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index 53fe333cf..d478ff279 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -280,6 +280,9 @@ def post_udi_event_response(unit): if unit == "kWh": value = value / 1000.0 + # get optional efficiency + roundtrip_efficiency = form.get("roundtrip_efficiency", None) + # set soc targets start_of_schedule = datetime end_of_schedule = datetime + current_app.config.get("FLEXMEASURES_PLANNING_HORIZON") @@ -349,6 +352,7 @@ def post_udi_event_response(unit): belief_time=datetime, soc_at_start=value, soc_targets=soc_targets, + roundtrip_efficiency=roundtrip_efficiency, udi_event_ea=form.get("event"), enqueue=True, ) diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index 98d95ba0c..ca59e6a29 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -21,6 +21,7 @@ def schedule_battery( resolution: timedelta, soc_at_start: float, soc_targets: Optional[pd.Series] = None, + roundtrip_efficiency: Optional[float] = None, prefer_charging_sooner: bool = True, ) -> Union[pd.Series, None]: """Schedule a battery asset based directly on the latest beliefs regarding market prices within the specified time @@ -37,6 +38,11 @@ def schedule_battery( ], ) + # Check for round-trip efficiency + if roundtrip_efficiency is None: + # Get default from sensor, or use 100% otherwise + roundtrip_efficiency = sensor.get_attribute("roundtrip_efficiency", 1) + # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( sensor, (start, end), resolution, allow_trimmed_query_window=True @@ -61,6 +67,16 @@ def schedule_battery( prices.loc[start : end - resolution]["event_value"] ] + # Apply round-trip efficiency evenly to charging and discharging prices + commitment_downwards_deviation_price = [ + commitment * roundtrip_efficiency ** 0.5 + for commitment in commitment_downwards_deviation_price + ] + commitment_upwards_deviation_price = [ + commitment / roundtrip_efficiency ** 0.5 + for commitment in commitment_upwards_deviation_price + ] + # Set up device constraints (only one device for this EMS) columns = [ "equals", diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 279fd9b71..1f6a7243f 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Optional, Union from datetime import datetime, timedelta from pandas import Series, Timestamp @@ -21,6 +21,7 @@ def schedule_charging_station( resolution: timedelta, soc_at_start: float, soc_targets: Series, + roundtrip_efficiency: Optional[float] = None, prefer_charging_sooner: bool = True, ) -> Union[Series, None]: """Schedule a charging station asset based directly on the latest beliefs regarding market prices within the specified time @@ -32,6 +33,11 @@ def schedule_charging_station( # Check for required Sensor attributes sensor.check_required_attributes([("capacity_in_mw", (float, int))]) + # Check for round-trip efficiency + if roundtrip_efficiency is None: + # Get default from sensor, or use 100% otherwise + roundtrip_efficiency = sensor.get_attribute("roundtrip_efficiency", 1) + # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( sensor, (start, end), resolution, allow_trimmed_query_window=True @@ -58,6 +64,16 @@ def schedule_charging_station( prices.loc[start : end - resolution]["event_value"] ] + # Apply round-trip efficiency evenly to charging and discharging + commitment_downwards_deviation_price = [ + commitment * roundtrip_efficiency ** 0.5 + for commitment in commitment_downwards_deviation_price + ] + commitment_upwards_deviation_price = [ + commitment / roundtrip_efficiency ** 0.5 + for commitment in commitment_upwards_deviation_price + ] + # Set up device constraints (only one device for this EMS) columns = [ "equals", diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index 1ea875994..13af125bd 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -36,6 +36,7 @@ def create_scheduling_job( resolution: timedelta = DEFAULT_RESOLUTION, soc_at_start: Optional[float] = None, soc_targets: Optional[pd.Series] = None, + roundtrip_efficiency: Optional[float] = None, udi_event_ea: Optional[str] = None, enqueue: bool = True, ) -> Job: @@ -61,6 +62,7 @@ def create_scheduling_job( resolution=resolution, soc_at_start=soc_at_start, soc_targets=soc_targets, + roundtrip_efficiency=roundtrip_efficiency, ), id=udi_event_ea, connection=current_app.queues["scheduling"].connection, @@ -88,6 +90,7 @@ def make_schedule( resolution: timedelta, soc_at_start: Optional[float] = None, soc_targets: Optional[pd.Series] = None, + roundtrip_efficiency: Optional[float] = None, ) -> bool: """Preferably, a starting soc is given. Otherwise, we try to retrieve the current state of charge from the asset (if that is the valid one at the start). @@ -122,14 +125,26 @@ def make_schedule( if sensor.generic_asset.generic_asset_type.name == "battery": consumption_schedule = schedule_battery( - sensor, start, end, resolution, soc_at_start, soc_targets + sensor, + start, + end, + resolution, + soc_at_start, + soc_targets, + roundtrip_efficiency, ) elif sensor.generic_asset.generic_asset_type.name in ( "one-way_evse", "two-way_evse", ): consumption_schedule = schedule_charging_station( - sensor, start, end, resolution, soc_at_start, soc_targets + sensor, + start, + end, + resolution, + soc_at_start, + soc_targets, + roundtrip_efficiency, ) else: raise ValueError( From 4f9f5c87a36f5dcd1d8e5c1b7cf11b5fe620dcbc Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 13:47:58 +0100 Subject: [PATCH 53/63] Add test cases for various round-trip efficiencies Signed-off-by: F.N. Claessen --- .../data/models/planning/tests/test_solver.py | 42 +++++++++++++++---- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index 10562f14d..2d80b414b 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -33,7 +33,15 @@ def test_battery_solver_day_1(add_battery_assets): assert soc <= battery.get_attribute("max_soc_in_mwh") -def test_battery_solver_day_2(add_battery_assets): +@pytest.mark.parametrize( + "roundtrip_efficiency", + [ + 1, + 0.99, + 0.01, + ], +) +def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da @@ -41,7 +49,14 @@ def test_battery_solver_day_2(add_battery_assets): end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) soc_at_start = battery.get_attribute("soc_in_mwh") - schedule = schedule_battery(battery, start, end, resolution, soc_at_start) + schedule = schedule_battery( + battery, + start, + end, + resolution, + soc_at_start, + roundtrip_efficiency=roundtrip_efficiency, + ) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): @@ -58,12 +73,23 @@ def test_battery_solver_day_2(add_battery_assets): assert soc_schedule.iloc[-1] == battery.get_attribute( "min_soc_in_mwh" ) # Battery sold out at the end of its planning horizon - assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( - "min_soc_in_mwh" - ) # Sell what you begin with - assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( - "max_soc_in_mwh" - ) # Buy what you can to sell later + + # As long as the roundtrip efficiency isn't too bad (I haven't computed the actual switch point) + if roundtrip_efficiency > 0.9: + assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( + "min_soc_in_mwh" + ) # Sell what you begin with + assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( + "max_soc_in_mwh" + ) # Buy what you can to sell later + else: + # If the roundtrip efficiency is poor, best to stand idle + assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( + "soc_in_mwh" + ) + assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( + "soc_in_mwh" + ) @pytest.mark.parametrize( From 32034ebd681a0dd8d0d674a0d0a203ab4b43640a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 13:58:02 +0100 Subject: [PATCH 54/63] Add changelog entries Signed-off-by: F.N. Claessen --- documentation/api/change_log.rst | 8 ++++++++ documentation/changelog.rst | 1 + 2 files changed, 9 insertions(+) diff --git a/documentation/api/change_log.rst b/documentation/api/change_log.rst index 4a76a5a54..d6f119230 100644 --- a/documentation/api/change_log.rst +++ b/documentation/api/change_log.rst @@ -39,6 +39,14 @@ v2.0-0 | 2020-11-14 - REST endpoints for managing assets: `/assets/` (GET, POST) and `/asset/` (GET, PATCH, DELETE). +v1.3-11 | 2022-01-01 +"""""""""""""""""""" + +*Affects all versions since v1.3*. + +- Extended the *postUdiEvent* endpoint with an optional "roundtrip_efficiency" field, for use in scheduling. + + v1.3-10 | 2021-11-08 """""""""""""""""""" diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 2844d3399..01626d64e 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -11,6 +11,7 @@ v0.8.0 | November XX, 2021 New features ----------- * Charts with sensor data can be requested in one of the supported [`vega-lite themes `_] (incl. a dark theme) [see `PR #221 `_] +* Schedulers take into account round-trip efficiency if set [see `PR #291 `_] Bugfixes ----------- From ae0c9cf469cbcf5aab0c312f488c25d2de9d366b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 14:03:49 +0100 Subject: [PATCH 55/63] Add documentation for the new API field Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_3/routes.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flexmeasures/api/v1_3/routes.py b/flexmeasures/api/v1_3/routes.py index 0e4c5addd..4154c7693 100644 --- a/flexmeasures/api/v1_3/routes.py +++ b/flexmeasures/api/v1_3/routes.py @@ -104,6 +104,7 @@ def post_udi_event(): This "PostUdiEventRequest" message posts a state of charge (soc) of 12.1 kWh at 10.00am, and a target state of charge of 25 kWh at 4.00pm, as UDI event 204 of device 10 of owner 7. + Roundtrip efficiency for use in scheduling is set to 98%. .. code-block:: json @@ -118,7 +119,8 @@ def post_udi_event(): "value": 25, "datetime": "2015-06-02T16:00:00+00:00" } - ] + ], + "roundtrip_efficiency": 0.98 } **Example response** From 981ff8f7ef6875167d76a2f4c00fab9909da7e43 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 14:41:38 +0100 Subject: [PATCH 56/63] Grammar corrections Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/solver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index dc73c465e..d7fd49285 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -33,7 +33,7 @@ def device_scheduler( # noqa C901 commitment_upwards_deviation_price: Union[List[pd.Series], List[float]], ) -> Tuple[List[pd.Series], float, SolverResults]: """Schedule devices given constraints on a device and EMS level, and given a list of commitments by the EMS. - The commitments are assumed to be with regards to the flow of energy to the device (positive for consumption, + The commitments are assumed to be with regard to the flow of energy to the device (positive for consumption, negative for production). The solver minimises the costs of deviating from the commitments. Device constraints are on a device level. Handled constraints (listed by column name): @@ -56,7 +56,7 @@ def device_scheduler( # noqa C901 All Series and DataFrames should have the same resolution. - For now we pass in the various constraints and prices as separate variables, from which we make a MultiIndex + For now, we pass in the various constraints and prices as separate variables, from which we make a MultiIndex DataFrame. Later we could pass in a MultiIndex DataFrame directly. """ From 48d98e7ece13c5634bf43e4dc1c9a4aab9cbc340 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 14:52:46 +0100 Subject: [PATCH 57/63] Fix return value for empty EMS Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/solver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index d7fd49285..b1179b20c 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -62,7 +62,7 @@ def device_scheduler( # noqa C901 # If the EMS has no devices, don't bother if len(device_constraints) == 0: - return [], 0 + return [], 0, SolverResults() # Check if commitments have the same time window and resolution as the constraints start = device_constraints[0].index.to_pydatetime()[0] From 6c86207076c97fd0f6eb9d79043f4a4c07233ff3 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 16:19:17 +0100 Subject: [PATCH 58/63] Allow efficiencies per device for multi-device EMS, by stopping the application of round-trip efficiency as price scalars and modeling device flows in more detail Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/battery.py | 16 ++-- .../data/models/planning/charging_station.py | 14 +-- flexmeasures/data/models/planning/solver.py | 95 +++++++++++++++---- 3 files changed, 89 insertions(+), 36 deletions(-) diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index ca59e6a29..06c9d406d 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -67,16 +67,6 @@ def schedule_battery( prices.loc[start : end - resolution]["event_value"] ] - # Apply round-trip efficiency evenly to charging and discharging prices - commitment_downwards_deviation_price = [ - commitment * roundtrip_efficiency ** 0.5 - for commitment in commitment_downwards_deviation_price - ] - commitment_upwards_deviation_price = [ - commitment / roundtrip_efficiency ** 0.5 - for commitment in commitment_upwards_deviation_price - ] - # Set up device constraints (only one device for this EMS) columns = [ "equals", @@ -85,6 +75,8 @@ def schedule_battery( "derivative equals", "derivative max", "derivative min", + "derivative down efficiency", + "derivative up efficiency", ] device_constraints = [initialize_df(columns, start, end, resolution)] if soc_targets is not None: @@ -106,6 +98,10 @@ def schedule_battery( ) device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") + # Apply round-trip efficiency evenly to charging and discharging + device_constraints[0]["derivative down efficiency"] = roundtrip_efficiency ** 0.5 + device_constraints[0]["derivative up efficiency"] = roundtrip_efficiency ** 0.5 + # Set up EMS constraints (no additional constraints) columns = ["derivative max", "derivative min"] ems_constraints = initialize_df(columns, start, end, resolution) diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 1f6a7243f..a5d1a56b8 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -64,16 +64,6 @@ def schedule_charging_station( prices.loc[start : end - resolution]["event_value"] ] - # Apply round-trip efficiency evenly to charging and discharging - commitment_downwards_deviation_price = [ - commitment * roundtrip_efficiency ** 0.5 - for commitment in commitment_downwards_deviation_price - ] - commitment_upwards_deviation_price = [ - commitment / roundtrip_efficiency ** 0.5 - for commitment in commitment_upwards_deviation_price - ] - # Set up device constraints (only one device for this EMS) columns = [ "equals", @@ -111,6 +101,10 @@ def schedule_charging_station( else: device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") + # Apply round-trip efficiency evenly to charging and discharging + device_constraints[0]["derivative down efficiency"] = roundtrip_efficiency ** 0.5 + device_constraints[0]["derivative up efficiency"] = roundtrip_efficiency ** 0.5 + # Set up EMS constraints (no additional constraints) columns = ["derivative max", "derivative min"] ems_constraints = initialize_df(columns, start, end, resolution) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index b1179b20c..057c08bbf 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -43,6 +43,8 @@ def device_scheduler( # noqa C901 derivative max: maximum flow (e.g. in MW or boxes/h) derivative min: minimum flow derivative equals: exact amount of flow (we do this by clamping derivative min and derivative max) + derivative down efficiency: ratio of downwards flows (flow into EMS : flow out of device) + derivative up efficiency: ratio of upwards flows (flow into device : flow out of EMS) EMS constraints are on an EMS level. Handled constraints (listed by column name): derivative max: maximum flow derivative min: minimum flow @@ -166,6 +168,18 @@ def ems_derivative_min_select(m, j): else: return v + def device_derivative_down_efficiency(m, d, j): + try: + return device_constraints[d]["derivative down efficiency"].iloc[j] + except KeyError: + return 1 + + def device_derivative_up_efficiency(m, d, j): + try: + return device_constraints[d]["derivative up efficiency"].iloc[j] + except KeyError: + return 1 + model.up_price = Param(model.c, model.j, initialize=price_up_select) model.down_price = Param(model.c, model.j, initialize=price_down_select) model.commitment_quantity = Param( @@ -181,13 +195,23 @@ def ems_derivative_min_select(m, j): ) model.ems_derivative_max = Param(model.j, initialize=ems_derivative_max_select) model.ems_derivative_min = Param(model.j, initialize=ems_derivative_min_select) + model.device_derivative_down_efficiency = Param( + model.d, model.j, initialize=device_derivative_down_efficiency + ) + model.device_derivative_up_efficiency = Param( + model.d, model.j, initialize=device_derivative_up_efficiency + ) # Add variables - model.power = Var(model.d, model.j, domain=Reals, initialize=0) - model.ems_power_deviation_down = Var( + model.ems_power = Var(model.d, model.j, domain=Reals, initialize=0) + model.device_power_down = Var( + model.d, model.j, domain=NonPositiveReals, initialize=0 + ) + model.device_power_up = Var(model.d, model.j, domain=NonNegativeReals, initialize=0) + model.commitment_downwards_deviation = Var( model.c, model.j, domain=NonPositiveReals, initialize=0 ) - model.ems_power_deviation_up = Var( + model.commitment_upwards_deviation = Var( model.c, model.j, domain=NonNegativeReals, initialize=0 ) @@ -195,28 +219,55 @@ def ems_derivative_min_select(m, j): def device_bounds(m, d, j): return ( m.device_min[d, j], - sum(m.power[d, k] for k in range(0, j + 1)), + sum( + m.device_power_down[d, k] + m.device_power_up[d, k] + for k in range(0, j + 1) + ), m.device_max[d, j], ) def device_derivative_bounds(m, d, j): return ( m.device_derivative_min[d, j], - m.power[d, j], + m.device_power_down[d, j] + m.device_power_up[d, j], + m.device_derivative_max[d, j], + ) + + def device_down_derivative_bounds(m, d, j): + return ( + m.device_derivative_min[d, j], + m.device_power_down[d, j], + 0, + ) + + def device_up_derivative_bounds(m, d, j): + return ( + 0, + m.device_power_up[d, j], m.device_derivative_max[d, j], ) def ems_derivative_bounds(m, j): - return m.ems_derivative_min[j], sum(m.power[:, j]), m.ems_derivative_max[j] + return m.ems_derivative_min[j], sum(m.ems_power[:, j]), m.ems_derivative_max[j] - def power_commitment_equality(m, j): - """Total power (sum over devices) should equal sum of commitments and deviations from commitments.""" + def ems_flow_commitment_equalities(m, j): + """Couple EMS flows (sum over devices) to commitments.""" return ( 0, sum(m.commitment_quantity[:, j]) - + sum(m.ems_power_deviation_down[:, j]) - + sum(m.ems_power_deviation_up[:, j]) - - sum(m.power[:, j]), + + sum(m.commitment_downwards_deviation[:, j]) + + sum(m.commitment_upwards_deviation[:, j]) + - sum(m.ems_power[:, j]), + 0, + ) + + def device_derivative_equalities(m, d, j): + """Couple device flows to EMS flows per device, applying efficiencies.""" + return ( + 0, + m.device_power_up[d, j] / m.device_derivative_up_efficiency[d, j] + + m.device_power_down[d, j] * m.device_derivative_up_efficiency[d, j] + - m.ems_power[d, j], 0, ) @@ -224,9 +275,18 @@ def power_commitment_equality(m, j): model.device_power_bounds = Constraint( model.d, model.j, rule=device_derivative_bounds ) + model.device_power_down_bounds = Constraint( + model.d, model.j, rule=device_down_derivative_bounds + ) + model.device_power_up_bounds = Constraint( + model.d, model.j, rule=device_up_derivative_bounds + ) model.ems_power_bounds = Constraint(model.j, rule=ems_derivative_bounds) - model.power_commitment_equality = Constraint( - model.j, rule=power_commitment_equality + model.ems_power_commitment_equalities = Constraint( + model.j, rule=ems_flow_commitment_equalities + ) + model.device_power_equalities = Constraint( + model.d, model.j, rule=device_derivative_equalities ) # Add objective @@ -234,8 +294,8 @@ def cost_function(m): costs = 0 for c in m.c: for j in m.j: - costs += m.ems_power_deviation_down[c, j] * m.down_price[c, j] - costs += m.ems_power_deviation_up[c, j] * m.up_price[c, j] + costs += m.commitment_downwards_deviation[c, j] * m.down_price[c, j] + costs += m.commitment_upwards_deviation[c, j] * m.up_price[c, j] return costs model.costs = Objective(rule=cost_function, sense=minimize) @@ -248,7 +308,10 @@ def cost_function(m): planned_costs = value(model.costs) planned_power_per_device = [] for d in model.d: - planned_device_power = [model.power[d, j].value for j in model.j] + planned_device_power = [ + model.device_power_down[d, j].value + model.device_power_up[d, j].value + for j in model.j + ] planned_power_per_device.append( pd.Series( index=pd.date_range( From 3c9d6bf92b7aa19771db29029a4425b4a55383f1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sat, 1 Jan 2022 16:32:02 +0100 Subject: [PATCH 59/63] Relax tests using some tolerance Signed-off-by: F.N. Claessen --- .../data/models/planning/tests/test_solver.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index 2d80b414b..28868a9bb 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -11,6 +11,9 @@ from flexmeasures.utils.time_utils import as_server_time +TOLERANCE = 0.00001 + + def test_battery_solver_day_1(add_battery_assets): epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() @@ -26,7 +29,9 @@ def test_battery_solver_day_1(add_battery_assets): print(soc_schedule) # Check if constraints were met - assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 + assert ( + min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 - TOLERANCE + ) assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") for soc in soc_schedule.values: assert soc >= battery.get_attribute("min_soc_in_mwh") @@ -64,7 +69,7 @@ def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): # Check if constraints were met assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 - assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") + assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") + TOLERANCE for soc in soc_schedule.values: assert soc >= battery.get_attribute("min_soc_in_mwh") assert soc <= battery.get_attribute("max_soc_in_mwh") @@ -135,12 +140,13 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): min(consumption_schedule.values) >= charging_station.get_attribute("capacity_in_mw") * -1 ) - assert max(consumption_schedule.values) <= charging_station.get_attribute( - "capacity_in_mw" + assert ( + max(consumption_schedule.values) + <= charging_station.get_attribute("capacity_in_mw") + TOLERANCE ) print(consumption_schedule.head(12)) print(soc_schedule.head(12)) - assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < 0.00001 + assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < TOLERANCE @pytest.mark.parametrize( @@ -197,5 +203,5 @@ def test_fallback_to_unsolvable_problem(target_soc, charging_station_name): print(soc_schedule.head(12)) assert ( abs(abs(soc_schedule.loc[target_soc_datetime] - target_soc) - expected_gap) - < 0.00001 + < TOLERANCE ) From ba12570131532015a47118b847147bcc2496348c Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 2 Jan 2022 14:25:41 +0100 Subject: [PATCH 60/63] Fix mistake Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/solver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index 057c08bbf..e102adc8c 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -266,7 +266,7 @@ def device_derivative_equalities(m, d, j): return ( 0, m.device_power_up[d, j] / m.device_derivative_up_efficiency[d, j] - + m.device_power_down[d, j] * m.device_derivative_up_efficiency[d, j] + + m.device_power_down[d, j] * m.device_derivative_down_efficiency[d, j] - m.ems_power[d, j], 0, ) From b3cea6f86f10886300117718fa32108d337d18e5 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 2 Jan 2022 14:52:27 +0100 Subject: [PATCH 61/63] Add test docstring Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/tests/test_solver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index 28868a9bb..a979d743d 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -47,6 +47,16 @@ def test_battery_solver_day_1(add_battery_assets): ], ) def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): + """Check battery scheduling results for day 2, which is set up with + 8 expensive, then 8 cheap, then again 8 expensive hours. + If efficiency losses aren't too bad, we expect the scheduler to: + - completely discharge within the first 8 hours + - completely charge within the next 8 hours + - completely discharge within the last 8 hours + If efficiency losses are bad, the price difference is not worth cycling the battery, + and so we expect the scheduler to only: + - completely discharge within the last 8 hours + """ epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da From a95936f2e00d78e1344d9ebc7d093f1da712366a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 2 Jan 2022 14:59:52 +0100 Subject: [PATCH 62/63] Check round-trip efficiency for acceptable range Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/battery.py | 2 ++ flexmeasures/data/models/planning/charging_station.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index 06c9d406d..41798122f 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -42,6 +42,8 @@ def schedule_battery( if roundtrip_efficiency is None: # Get default from sensor, or use 100% otherwise roundtrip_efficiency = sensor.get_attribute("roundtrip_efficiency", 1) + if roundtrip_efficiency <= 0 or roundtrip_efficiency > 1: + raise ValueError("roundtrip_efficiency expected within the interval (0, 1]") # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index a5d1a56b8..93de81ac8 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -37,6 +37,8 @@ def schedule_charging_station( if roundtrip_efficiency is None: # Get default from sensor, or use 100% otherwise roundtrip_efficiency = sensor.get_attribute("roundtrip_efficiency", 1) + if roundtrip_efficiency <= 0 or roundtrip_efficiency > 1: + raise ValueError("roundtrip_efficiency expected within the interval (0, 1]") # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( From 527d92603957f4d36b596f9bdb15e3ee50788d85 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 3 Jan 2022 13:51:50 +0100 Subject: [PATCH 63/63] Expand docstring Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/solver.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index e102adc8c..a473553f3 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -32,7 +32,10 @@ def device_scheduler( # noqa C901 commitment_downwards_deviation_price: Union[List[pd.Series], List[float]], commitment_upwards_deviation_price: Union[List[pd.Series], List[float]], ) -> Tuple[List[pd.Series], float, SolverResults]: - """Schedule devices given constraints on a device and EMS level, and given a list of commitments by the EMS. + """This generic device scheduler is able to handle an EMS with multiple devices, + with various types of constraints on the EMS level and on the device level, + and with multiple market commitments on the EMS level. + A typical example is a house with many devices. The commitments are assumed to be with regard to the flow of energy to the device (positive for consumption, negative for production). The solver minimises the costs of deviating from the commitments.