From 58698ed31698be4f138dd96d9cd4e5db3de8f9b5 Mon Sep 17 00:00:00 2001 From: "create-issue-branch[bot]" <53036503+create-issue-branch[bot]@users.noreply.github.com> Date: Thu, 25 Nov 2021 15:08:56 +0100 Subject: [PATCH 01/46] Move meta data from asset/market/weather classes and types to generic asset (types) (#240) Refactoring: - Renamed generic_asset variables to old_sensor wherever the variable was expecting a type Union[Asset, Market, WeatherSensor]. - Forecasting sub-package gets its attributes from GenericAsset or Sensor instead of from old sensor models. - Planning/scheduling sub-package gets its attributes from GenericAsset or Sensor instead of from old sensor models. - API packages gets its attributes from GenericAsset or Sensor instead of from old sensor models. - API package gets and sets metadata on GenericAssets #243 Moving over old data: - Sensor units, event resolutions and knowledge horizons (fnc and par) copied from old sensors (these already had dedicated columns in both the old and new models) in a database migration. One-way transition. - Copy over all old sensor attributes that have no dedicated column in the GenericAsset model (just the display name for Market and WeatherSensor, but a lot of other attributes for Assets). These will be put in the new GenericAsset.attributes column as JSON, or in the new Sensor.attributes column as JSON. - Copy over all old sensor type attributes that have no dedicated column in the GenericAsset model (just seasonalities for Market and WeatherSensor, plus some is_consumer- and can_curtail-like attributes for Assets). Note that I'm moving these to the GenericAsset or Sensor, rather than to the GenericAssetType model. - Copy display name to both GenericAsset and Sensor. Move over crud functionality / make tests work: - Make sure to copy over units, event resolutions, knowledge horizons and other attributes (incl. weather correlations), whenever an Asset gets created, to the already simultaneously created new sensor and generic asset. - Do this for Markets and WeatherSensors, too. - API package gets and sets metadata on GenericAssets #243 - Allow JSON fields to be updated (see here). API package gets and sets metadata on GenericAssets #243 * Create draft PR for #239 * Db migration that copies over attributes from old data models * - In Asset.__init__, copy over attributes to GenericAsset. - Start having our model_spec_factory get attributes it needs from GenericAsset. - Rename variables, specifically, variables that were annotated as a union of our old sensor models were named generic_asset, which was too easily confused with instances of our GenericAsset class. * model_spec_factory now gets its attributes from GenericAsset instead of old sensor model types * More renaming to avoid confusion * Have db migration copy over sensor attributes: unit, event_resolution and knowledge horizons * In Asset.__init__, copy over sensor attributes: unit, event_resolution and knowledge horizons * model_spec_factory now gets event_resolution and name from Sensor * Fix tests * Factor out use of corresponding_generic_asset attribute * Factor out use of corresponding_generic_asset attribute * More renaming * Pass time series class to model configurator explicitly * Finally, model_spec_factory doesn't need the old sensor model anymore * Allow setting the collect function name for TBSeriesSpecs to something custom * In Asset.__init__, copy over additional asset attributes to GenericAsset * Planning subpackage uses sensors instead of assets * Move some simple attributes in the UI package * Refactor to stop explicitly passing the market to the scheduler, and instead have the scheduler check for an applicable market * Revert "Move some simple attributes in the UI package", because this needs to be done jointly with moving over asset crud (which we test for) This reverts commit 56ff279cc19ce58a2ab3c56224bae5226c9fbd9c. * Add notes about how each attribute is to be copied from an old class to a new class * Intend to copy display_name to both GenericAsset and Sensor * Introduce Sensor attributes and copy most old model attributes there instead of to GenericAsset attributes * Adjust attribute copying in Asset.__init__ * Implement Sensor method to get an attribute * Give old sensor classes a generic_asset property * Give old sensor classes a get_attribute property * Derive Sensor class lat/lng location from GenericAsset * Get attributes from Sensor rather than from GenericAsset * Set default attributes on generic assets, too * Add clarity to method docstring Co-authored-by: Flix6x Co-authored-by: F.N. Claessen --- flexmeasures/api/v1/implementations.py | 2 +- .../api/v1/tests/test_api_v1_fresh_db.py | 2 +- flexmeasures/api/v1_1/tests/test_api_v1_1.py | 2 +- flexmeasures/api/v1_2/implementations.py | 15 +- .../tests/test_api_v2_0_sensors_fresh_db.py | 2 +- flexmeasures/cli/data_add.py | 6 +- flexmeasures/cli/testing.py | 20 +- ...es_from_old_data_models_to_GenericAsset.py | 366 ++++++++++++++++++ flexmeasures/data/models/assets.py | 119 +++++- .../data/models/forecasting/__init__.py | 2 +- .../models/forecasting/model_spec_factory.py | 128 +++--- flexmeasures/data/models/forecasting/utils.py | 37 +- flexmeasures/data/models/generic_assets.py | 9 +- flexmeasures/data/models/markets.py | 20 +- flexmeasures/data/models/planning/battery.py | 24 +- .../data/models/planning/charging_station.py | 18 +- .../data/models/planning/exceptions.py | 4 + .../data/models/planning/tests/test_solver.py | 60 +-- flexmeasures/data/models/planning/utils.py | 19 +- flexmeasures/data/models/time_series.py | 42 +- flexmeasures/data/models/utils.py | 31 +- flexmeasures/data/models/weather.py | 20 +- flexmeasures/data/queries/utils.py | 12 +- flexmeasures/data/scripts/data_gen.py | 134 +++---- flexmeasures/data/services/forecasting.py | 74 ++-- flexmeasures/data/services/resources.py | 4 +- flexmeasures/data/services/scheduling.py | 27 +- flexmeasures/data/services/time_series.py | 28 +- .../data/tests/test_forecasting_jobs.py | 10 +- .../tests/test_forecasting_jobs_fresh_db.py | 6 +- .../data/tests/test_scheduling_jobs.py | 5 +- .../tests/test_scheduling_jobs_fresh_db.py | 7 +- 32 files changed, 907 insertions(+), 348 deletions(-) create mode 100644 flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 9203161b3..51c0df6cb 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -201,7 +201,7 @@ def collect_connection_and_value_groups( # Get the power values # TODO: fill NaN for non-existing values power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.collect( - generic_asset_names=asset_names, + old_sensor_names=asset_names, query_window=(start, end), resolution=resolution, belief_horizon_window=belief_horizon_window, diff --git a/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py b/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py index c7441123d..e92749927 100644 --- a/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py +++ b/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py @@ -73,7 +73,7 @@ def test_post_and_get_meter_data( for asset_name in ("CS 1", "CS 2", "CS 3"): if asset_name in str(post_message): asset = Asset.query.filter_by(name=asset_name).one_or_none() - assert asset.id in [job.kwargs["asset_id"] for job in jobs] + assert asset.id in [job.kwargs["old_sensor_id"] for job in jobs] # get meter data get_meter_data_response = client.get( diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1.py b/flexmeasures/api/v1_1/tests/test_api_v1_1.py index 0504664fe..6d2fbd9cf 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1.py @@ -155,7 +155,7 @@ def test_post_price_data(setup_api_test_data, db, app, clean_redis, post_message assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon assert job.kwargs["timed_value_type"] == "Price" - assert job.kwargs["asset_id"] == market.id + assert job.kwargs["old_sensor_id"] == market.id @pytest.mark.parametrize( diff --git a/flexmeasures/api/v1_2/implementations.py b/flexmeasures/api/v1_2/implementations.py index ac674f935..85cc03bb9 100644 --- a/flexmeasures/api/v1_2/implementations.py +++ b/flexmeasures/api/v1_2/implementations.py @@ -34,7 +34,10 @@ from flexmeasures.data.config import db from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.planning.battery import schedule_battery -from flexmeasures.data.models.planning.exceptions import UnknownPricesException +from flexmeasures.data.models.planning.exceptions import ( + UnknownMarketException, + UnknownPricesException, +) from flexmeasures.data.services.resources import has_assets, can_access_asset @@ -87,16 +90,10 @@ def get_device_message_response(generic_asset_name_groups, duration): start = asset.soc_datetime resolution = asset.event_resolution - # Look for the Market object - market = asset.market - if market is None: - return invalid_market() - # Schedule the asset try: schedule = schedule_battery( - asset, - market, + asset.corresponding_sensor, start, start + planning_horizon, resolution, @@ -105,6 +102,8 @@ def get_device_message_response(generic_asset_name_groups, duration): ) except UnknownPricesException: return unknown_prices() + except UnknownMarketException: + return invalid_market() else: # Update the planning window start = schedule.index[0] diff --git a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py index c87a6cda6..4b7adcbf4 100644 --- a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py +++ b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py @@ -60,4 +60,4 @@ def test_post_price_data_2_0( assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon assert job.kwargs["timed_value_type"] == "Price" - assert job.kwargs["asset_id"] == market.id + assert job.kwargs["old_sensor_id"] == market.id diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 01501fa6b..649c2e9b5 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -619,7 +619,7 @@ def create_forecasts( # Note that this time period refers to the period of events we are forecasting, while in create_forecasting_jobs # the time period refers to the period of belief_times, therefore we are subtracting the horizon. create_forecasting_jobs( - asset_id=asset_id, + old_sensor_id=asset_id, timed_value_type=value_type, horizons=[horizon], start_of_roll=forecast_start - horizon, @@ -634,8 +634,8 @@ def create_forecasts( forecast_start=forecast_start, forecast_end=forecast_end, event_resolution=event_resolution, - generic_asset_type=asset_type, - generic_asset_id=asset_id, + old_sensor_class_name=asset_type, + old_sensor_id=asset_id, ) diff --git a/flexmeasures/cli/testing.py b/flexmeasures/cli/testing.py index 72011a011..f9ef8bdc5 100644 --- a/flexmeasures/cli/testing.py +++ b/flexmeasures/cli/testing.py @@ -16,6 +16,7 @@ from flexmeasures.data.models.markets import Market from flexmeasures.data.models.weather import WeatherSensor from flexmeasures.data.models.forecasting import lookup_model_specs_configurator +from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor from flexmeasures.utils.time_utils import as_server_time from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, @@ -52,7 +53,7 @@ def test_making_forecasts(): click.echo("Forecasts found before : %d" % forecast_filter.count()) create_forecasting_jobs( - asset_id=asset_id, + old_sensor_id=asset_id, timed_value_type="Power", horizons=[timedelta(hours=6)], start_of_roll=as_server_time(datetime(2015, 4, 1)), @@ -79,7 +80,7 @@ def test_making_forecasts(): # un-comment to use as CLI function # @app.cli.command() @click.option("--asset-type", help="Asset type name.") -@click.option("--asset", help="Asset name.") +@click.option("--asset", "asset_name", help="Asset name.") @click.option( "--from_date", default="2015-03-10", @@ -92,7 +93,7 @@ def test_making_forecasts(): ) def test_generic_model( asset_type: str, - asset: Optional[str] = None, + asset_name: Optional[str] = None, from_date: str = "2015-03-10", period: int = 3, horizon_hours: int = 1, @@ -101,10 +102,8 @@ def test_generic_model( """Manually test integration of timetomodel for our generic model.""" asset_type_name = asset_type - if asset is None: + if asset_name is None: asset_name = Asset.query.filter_by(asset_type_name=asset_type_name).first().name - else: - asset_name = asset start = as_server_time(datetime.strptime(from_date, "%Y-%m-%d")) end = start + timedelta(days=period) training_and_testing_period = timedelta(days=training) @@ -127,11 +126,11 @@ def test_generic_model( .first() ) if asset: - generic_asset = asset + old_sensor = asset elif market: - generic_asset = market + old_sensor = market elif sensor: - generic_asset = sensor + old_sensor = sensor else: click.echo("No such assets in db, so I will not add any forecasts.") return @@ -142,7 +141,8 @@ def test_generic_model( model_identifier, fallback_model_identifier, ) = linear_model_configurator( - generic_asset=generic_asset, + sensor=old_sensor.corresponding_sensor, + time_series_class=determine_old_time_series_class_by_old_sensor(old_sensor), forecast_start=start, forecast_end=end, forecast_horizon=horizon, diff --git a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py new file mode 100644 index 000000000..d8128ff3e --- /dev/null +++ b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py @@ -0,0 +1,366 @@ +"""Copy attributes from old data models to GenericAsset + +Revision ID: 6cf5b241b85f +Revises: 1ae32ffc8c3f +Create Date: 2021-11-11 17:18:15.395915 + +""" +import json +from datetime import datetime + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "6cf5b241b85f" +down_revision = "1ae32ffc8c3f" +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column( + "generic_asset", sa.Column("attributes", sa.JSON(), nullable=True, default="{}") + ) + op.add_column( + "sensor", sa.Column("attributes", sa.JSON(), nullable=True, default="{}") + ) + + """ + - For each OldModel (Market/WeatherSensor/Asset), get the Sensor with the same id as the OldModel, + and then get the GenericAsset of that Sensor. + - Add the OldModel's display name to the corresponding GenericAsset's attributes, + and other attributes we want to copy. + - Find the OldModelType (MarketType/WeatherSensorType/AssetType) of the OldModel, + and copy its seasonalities to the GenericAsset's attributes. + """ + # todo: find places where we look for seasonality and get it from the corresponding GenericAsset instead + # todo: find places where we look for old_model_type and get it from the corresponding GenericAsset instead + + # Declare ORM table views + t_generic_asset = sa.Table( + "generic_asset", + sa.MetaData(), + sa.Column("id"), + sa.Column("attributes"), + ) + t_sensor = sa.Table( + "sensor", + sa.MetaData(), + sa.Column("id"), + sa.Column("attributes"), + sa.Column("generic_asset_id"), + sa.Column("unit"), + sa.Column("event_resolution"), + sa.Column("knowledge_horizon_fnc"), + sa.Column("knowledge_horizon_par"), + ) + t_market = sa.Table( + "market", + sa.MetaData(), + sa.Column("id", sa.Integer), + sa.Column("market_type_name", sa.String(80)), + sa.Column( + "display_name", sa.String(80) + ), # Copy to both Sensor and to GenericAsset + sa.Column("unit"), # Copy to Sensor [done] + sa.Column("event_resolution"), # Copy to Sensor [done] + sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done] + sa.Column("knowledge_horizon_par"), # Copy to Sensor [done] + ) + t_market_type = sa.Table( + "market_type", + sa.MetaData(), + sa.Column("name", sa.String(80)), + sa.Column("daily_seasonality", sa.Boolean), # Copy to Sensor + sa.Column("weekly_seasonality", sa.Boolean), # Copy to Sensor + sa.Column("yearly_seasonality", sa.Boolean), # Copy to Sensor + ) + t_asset = sa.Table( + "asset", + sa.MetaData(), + sa.Column("id"), + sa.Column("asset_type_name"), + sa.Column("display_name"), # Copy to both Sensor and to GenericAsset + sa.Column("capacity_in_mw"), # Copy to Sensor + sa.Column("min_soc_in_mwh"), # Copy to GenericAsset [1] + sa.Column("max_soc_in_mwh"), # Copy to GenericAsset [1] + sa.Column("soc_in_mwh"), # Copy to GenericAsset [1] + sa.Column("soc_datetime"), # Copy to GenericAsset [1] + sa.Column("soc_udi_event_id"), # Copy to GenericAsset [2] + sa.Column("market_id"), # Copy to Sensor [3] + sa.Column("unit"), # Copy to Sensor [done] + sa.Column("event_resolution"), # Copy to Sensor [done] + sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done] + sa.Column("knowledge_horizon_par"), # Copy to Sensor [done] + ) + # [1] will be moved to a separate sensor later + # [2] deprecated in favour of Redis job id since api v1.3 + # [3] will be deprecated in favour of something like a weighed by relationship (could be multiple) + t_asset_type = sa.Table( + "asset_type", + sa.MetaData(), + sa.Column("name", sa.String(80)), + sa.Column("is_consumer"), # Copy to Sensor + sa.Column("is_producer"), # Copy to Sensor + sa.Column("can_curtail"), # Copy to GenericAsset [4] + sa.Column("can_shift"), # Copy to GenericAsset [4] + sa.Column("daily_seasonality", sa.Boolean), # Copy to Sensor + sa.Column("weekly_seasonality", sa.Boolean), # Copy to Sensor + sa.Column("yearly_seasonality", sa.Boolean), # Copy to Sensor + ) + # [4] will be deprecated in favour of actuator functionality + t_weather_sensor = sa.Table( + "weather_sensor", + sa.MetaData(), + sa.Column("id"), + sa.Column("weather_sensor_type_name"), + sa.Column("display_name"), # Copy to both Sensor and to GenericAsset + sa.Column("unit"), # Copy to Sensor [done] + sa.Column("event_resolution"), # Copy to Sensor [done] + sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done] + sa.Column("knowledge_horizon_par"), # Copy to Sensor [done] + ) + t_weather_sensor_type = sa.Table( + "weather_sensor_type", + sa.MetaData(), + sa.Column("name", sa.String(80)), + ) + + # Use SQLAlchemy's connection and transaction to go through the data + connection = op.get_bind() + + # Set default attributes + connection.execute( + t_sensor.update().values( + attributes=json.dumps({}), + ) + ) + connection.execute( + t_generic_asset.update().values( + attributes=json.dumps({}), + ) + ) + + copy_attributes( + connection, + t_market, + t_sensor, + t_target=t_sensor, + t_old_model_type=t_market_type, + old_model_attributes=["id", "market_type_name", "display_name"], + old_model_type_attributes=[ + "daily_seasonality", + "weekly_seasonality", + "yearly_seasonality", + ], + ) + copy_attributes( + connection, + t_market, + t_sensor, + t_target=t_generic_asset, + t_old_model_type=t_market_type, + old_model_attributes=["id", "market_type_name", "display_name"], + ) + copy_attributes( + connection, + t_weather_sensor, + t_sensor, + t_target=t_sensor, + t_old_model_type=t_weather_sensor_type, + old_model_attributes=["id", "weather_sensor_type_name", "display_name"], + extra_attributes={ + "daily_seasonality": True, + "weekly_seasonality": False, + "yearly_seasonality": True, + }, # The WeatherSensor table had these hardcoded (d, w, y) seasonalities + ) + copy_attributes( + connection, + t_weather_sensor, + t_sensor, + t_target=t_generic_asset, + t_old_model_type=t_weather_sensor_type, + old_model_attributes=["id", "weather_sensor_type_name", "display_name"], + ) + copy_attributes( + connection, + t_asset, + t_sensor, + t_target=t_sensor, + t_old_model_type=t_asset_type, + old_model_attributes=[ + "id", + "asset_type_name", + "display_name", + "capacity_in_mw", + "market_id", + ], + old_model_type_attributes=[ + "is_consumer", + "is_producer", + "daily_seasonality", + "weekly_seasonality", + "yearly_seasonality", + ], + ) + copy_attributes( + connection, + t_asset, + t_sensor, + t_target=t_generic_asset, + t_old_model_type=t_asset_type, + old_model_attributes=[ + "id", + "asset_type_name", + "display_name", + "min_soc_in_mwh", + "max_soc_in_mwh", + "soc_in_mwh", + "soc_datetime", + "soc_udi_event_id", + ], + old_model_type_attributes=[ + "can_curtail", + "can_shift", + ], + ) + op.alter_column( + "sensor", + "attributes", + nullable=False, + ) + op.alter_column( + "generic_asset", + "attributes", + nullable=False, + ) + copy_sensor_columns(connection, t_market, t_sensor) + copy_sensor_columns(connection, t_weather_sensor, t_sensor) + copy_sensor_columns(connection, t_asset, t_sensor) + + +def downgrade(): + op.drop_column("sensor", "attributes") + op.drop_column("generic_asset", "attributes") + + +def copy_sensor_columns(connection, t_old_model, t_sensor): + old_model_attributes = [ + "id", + "unit", + "event_resolution", + "knowledge_horizon_fnc", + "knowledge_horizon_par", + ] + + # Get columns from old model + results = connection.execute( + sa.select([getattr(t_old_model.c, a) for a in old_model_attributes]) + ).fetchall() + + for sensor_id, *args in results: + + # Obtain columns we want to copy over, from the old model + old_model_columns_to_copy = { + k: v if not isinstance(v, dict) else json.dumps(v) + for k, v in zip(old_model_attributes[-len(args) :], args) + } + + # Fill in the Sensor's columns + connection.execute( + t_sensor.update() + .where(t_sensor.c.id == sensor_id) + .values( + **old_model_columns_to_copy, + ) + ) + + +def copy_attributes( + connection, + t_old_model, + t_sensor, + t_target, + t_old_model_type, + old_model_attributes, + old_model_type_attributes=[], + extra_attributes={}, +): + """ + + :param old_model_attributes: first two attributes should be id and old_model_type_name, then any other columns we want to copy over from the old model + :param old_model_type_attributes: columns we want to copy over from the old model type + :param extra_attributes: any additional attributes we want to set + """ + # Get attributes from old model + results = connection.execute( + sa.select([getattr(t_old_model.c, a) for a in old_model_attributes]) + ).fetchall() + + for id, type_name, *args in results: + + # Obtain attributes we want to copy over, from the old model + old_model_attributes_to_copy = { + k: v if not isinstance(v, datetime) else v.isoformat() + for k, v in zip(old_model_attributes[-len(args) :], args) + } + + # Obtain seasonality attributes we want to copy over, from the old model type + old_model_type_attributes_to_copy = get_old_model_type_attributes( + connection, + type_name, + t_old_model_type, + old_model_type_attributes=old_model_type_attributes, + ) + + # Find out where to copy over the attributes + if t_target.name == "generic_asset": + target_id = get_generic_asset_id(connection, id, t_sensor) + elif t_target.name == "sensor": + target_id = id + else: + raise ValueError + + # Fill in the target class's attributes + connection.execute( + t_target.update() + .where(t_target.c.id == target_id) + .values( + attributes=json.dumps( + { + **old_model_attributes_to_copy, + **old_model_type_attributes_to_copy, + **extra_attributes, + } + ) + ) + ) + + +def get_generic_asset_id(connection, old_model_id: int, t_sensors) -> int: + """Get the Sensor with the same id as the OldModel, and then get the id of the GenericAsset of that Sensor.""" + (generic_asset_id,) = connection.execute( + sa.select( + [ + t_sensors.c.generic_asset_id, + ] + ).filter(t_sensors.c.id == old_model_id) + ).one_or_none() + assert generic_asset_id is not None + return generic_asset_id + + +def get_old_model_type_attributes( + connection, old_model_type_name, t_old_model_types, old_model_type_attributes +) -> dict: + """Get the attributes from the OldModelType.""" + values = connection.execute( + sa.select( + [getattr(t_old_model_types.c, a) for a in old_model_type_attributes] + ).filter(t_old_model_types.c.name == old_model_type_name) + ).one_or_none() + assert values is not None + return {k: v for k, v in zip(old_model_type_attributes, values)} diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index 214825c61..2cbd9eaac 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -10,6 +10,7 @@ from flexmeasures.data.models.time_series import Sensor, TimedValue from flexmeasures.data.models.generic_assets import ( create_generic_asset, + GenericAsset, GenericAssetType, ) from flexmeasures.utils.entity_address_utils import build_entity_address @@ -110,25 +111,105 @@ class Asset(db.Model, tb.SensorDBMixin): def __init__(self, **kwargs): + if "unit" not in kwargs: + kwargs["unit"] = "MW" # current default + super(Asset, self).__init__(**kwargs) + # Create a new Sensor with unique id across assets, markets and weather sensors - # Also keep track of ownership. + # Also keep track of ownership by creating a GenericAsset and assigning the new Sensor to it. if "id" not in kwargs: + + # Set up generic asset generic_assets_arg = kwargs.copy() + if "asset_type_name" in generic_assets_arg: + asset_type = db.session.query(AssetType).get( + generic_assets_arg["asset_type_name"] + ) + else: + asset_type = generic_assets_arg["asset_type"] + asset_type_attributes_for_generic_asset = [ + "can_curtail", + "can_shift", + ] + asset_attributes_for_generic_asset = [ + "display_name", + "min_soc_in_mwh", + "max_soc_in_mwh", + "soc_in_mwh", + "soc_datetime", + "soc_udi_event_id", + ] + generic_asset_attributes_from_asset_type = { + a: getattr(asset_type, a) + for a in asset_type_attributes_for_generic_asset + } + generic_asset_attributes_from_asset = { + a: getattr(self, a) + if not isinstance(getattr(self, a), datetime) + else getattr(self, a).isoformat() + for a in asset_attributes_for_generic_asset + } + generic_assets_arg = { + **generic_assets_arg, + **{ + "attributes": { + **generic_asset_attributes_from_asset_type, + **generic_asset_attributes_from_asset, + }, + }, + } + if "owner_id" in kwargs: owner = User.query.get(kwargs["owner_id"]) if owner: generic_assets_arg.update(account_id=owner.account_id) new_generic_asset = create_generic_asset("asset", **generic_assets_arg) - new_sensor = Sensor(name=kwargs["name"], generic_asset=new_generic_asset) + + # Set up sensor + sensor_kwargs = dict( + name=kwargs["name"], + generic_asset=new_generic_asset, + ) + asset_type_attributes_for_sensor = [ + "is_consumer", + "is_producer", + "daily_seasonality", + "weekly_seasonality", + "yearly_seasonality", + "weather_correlations", + ] + asset_attributes_for_sensor = [ + "display_name", + "capacity_in_mw", + "market_id", + ] + sensor_attributes_from_asset_type = { + a: getattr(asset_type, a) for a in asset_type_attributes_for_sensor + } + sensor_attributes_from_asset = { + a: getattr(self, a) + if not isinstance(getattr(self, a), datetime) + else getattr(self, a).isoformat() + for a in asset_attributes_for_sensor + } + sensor_kwargs = { + **sensor_kwargs, + **{ + "attributes": { + **sensor_attributes_from_asset_type, + **sensor_attributes_from_asset, + }, + }, + } + new_sensor = Sensor( + **sensor_kwargs, + ) db.session.add(new_sensor) db.session.flush() # generates the pkey for new_sensor sensor_id = new_sensor.id else: # The UI may initialize Asset objects from API form data with a known id sensor_id = kwargs["id"] - if "unit" not in kwargs: - kwargs["unit"] = "MW" # current default - super(Asset, self).__init__(**kwargs) self.id = sensor_id if self.unit != "MW": raise Exception("FlexMeasures only supports MW as unit for now.") @@ -136,6 +217,15 @@ def __init__(self, **kwargs): if "display_name" not in kwargs: self.display_name = humanize(self.name) + # Copy over additional columns from (newly created) Asset to (newly created) Sensor + if "id" not in kwargs: + db.session.add(self) + db.session.flush() # make sure to generate each column for the old sensor + new_sensor.unit = self.unit + new_sensor.event_resolution = self.event_resolution + new_sensor.knowledge_horizon_fnc = self.knowledge_horizon_fnc + new_sensor.knowledge_horizon_par = self.knowledge_horizon_par + asset_type = db.relationship("AssetType", backref=db.backref("assets", lazy=True)) owner = db.relationship( "User", @@ -159,6 +249,23 @@ def latest_state(self, event_ends_before: Optional[datetime] = None) -> "Power": ) return power_query.first() + @property + def corresponding_sensor(self) -> Sensor: + return db.session.query(Sensor).get(self.id) + + @property + def generic_asset(self) -> GenericAsset: + return db.session.query(GenericAsset).get(self.corresponding_sensor.id) + + def get_attribute(self, attribute: str): + """Looks for the attribute on the corresponding Sensor. + + This should be used by all code to read these attributes, + over accessing them directly on this class, + as this table is in the process to be replaced by the Sensor table. + """ + return self.corresponding_sensor.get_attribute(attribute) + @property def power_unit(self) -> float: """Return the 'unit' property of the generic asset, just with a more insightful name.""" @@ -257,7 +364,7 @@ def make_query( **kwargs, ) -> Query: """Construct the database query.""" - return super().make_query(asset_class=Asset, **kwargs) + return super().make_query(old_sensor_class=Asset, **kwargs) def to_dict(self): return { diff --git a/flexmeasures/data/models/forecasting/__init__.py b/flexmeasures/data/models/forecasting/__init__.py index e16317205..faba9c160 100644 --- a/flexmeasures/data/models/forecasting/__init__.py +++ b/flexmeasures/data/models/forecasting/__init__.py @@ -42,7 +42,7 @@ def lookup_model_specs_configurator( Model meta data in this context means a tuple of: * timetomodel.ModelSpecs. To fill in those specs, a configurator should accept: - - generic_asset: Union[Asset, Market, WeatherSensor], + - old_sensor: Union[Asset, Market, WeatherSensor], - start: datetime, # Start of forecast period - end: datetime, # End of forecast period - horizon: timedelta, # Duration between time of forecasting and time which is forecast diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 5a01c2898..13f0ac3bb 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union from datetime import datetime, timedelta, tzinfo from pprint import pformat import logging @@ -19,13 +19,10 @@ ) import pandas as pd -from flexmeasures.data.models.assets import AssetType, Asset -from flexmeasures.data.models.markets import MarketType, Market -from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather -from flexmeasures.data.models.utils import ( - determine_asset_type_by_asset, - determine_asset_value_class_by_asset, -) +from flexmeasures.data.models.assets import Power +from flexmeasures.data.models.markets import Price +from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.weather import Weather, WeatherSensor from flexmeasures.data.models.forecasting.utils import ( create_lags, set_training_and_testing_dates, @@ -46,19 +43,20 @@ class TBSeriesSpecs(SeriesSpecs): """Compatibility for using timetomodel.SeriesSpecs with timely_beliefs.BeliefsDataFrames. - This implements _load_series such that TimedValue.collect is called on the generic asset class, + This implements _load_series such that .collect is called, with the parameters in collect_params. The collect function is expected to return a BeliefsDataFrame. """ - generic_asset_value_class: Any # with collect method + time_series_class: Any # with method (named "collect" by default) collect_params: dict def __init__( self, - generic_asset_value_class, + time_series_class, collect_params: dict, name: str, + collect_fnc: str = "collect", original_tz: Optional[tzinfo] = pytz.utc, # postgres stores naive datetimes feature_transformation: Optional[ReversibleTransformation] = None, post_load_processing: Optional[Transformation] = None, @@ -73,15 +71,14 @@ def __init__( resampling_config, interpolation_config, ) - self.generic_asset_value_class = generic_asset_value_class + self.time_series_class = time_series_class self.collect_params = collect_params + self.collect_fnc = collect_fnc def _load_series(self) -> pd.Series: - logger.info( - "Reading %s data from database" % self.generic_asset_value_class.__name__ - ) + logger.info("Reading %s data from database" % self.time_series_class.__name__) - bdf: BeliefsDataFrame = self.generic_asset_value_class.collect( + bdf: BeliefsDataFrame = getattr(self.time_series_class, self.collect_fnc)( **self.collect_params ) assert isinstance(bdf, BeliefsDataFrame) @@ -101,7 +98,7 @@ def check_data(self, df: pd.DataFrame): "No values found in database for the requested %s data. It's no use to continue I'm afraid." " Here's a print-out of what I tried to collect:\n\n%s\n\n" % ( - self.generic_asset_value_class.__name__, + self.time_series_class.__name__, pformat(self.collect_params, sort_dicts=False), ) ) @@ -110,14 +107,15 @@ def check_data(self, df: pd.DataFrame): "Nan values found in database for the requested %s data. It's no use to continue I'm afraid." " Here's a print-out of what I tried to collect:\n\n%s\n\n" % ( - self.generic_asset_value_class.__name__, + self.time_series_class.__name__, pformat(self.collect_params, sort_dicts=False), ) ) def create_initial_model_specs( # noqa: C901 - generic_asset: Union[Asset, Market, WeatherSensor], + sensor: Sensor, + time_series_class: Type[Union[Power, Price, Weather]], forecast_start: datetime, # Start of forecast period forecast_end: datetime, # End of forecast period forecast_horizon: timedelta, # Duration between time of forecasting and end time of the event that is forecast @@ -138,17 +136,14 @@ def create_initial_model_specs( # noqa: C901 calendar day. """ - generic_asset_type = determine_asset_type_by_asset(generic_asset) - generic_asset_value_class = determine_asset_value_class_by_asset(generic_asset) - params = _parameterise_forecasting_by_asset_and_asset_type( - generic_asset, generic_asset_type, transform_to_normal + sensor, transform_to_normal ) params.update(custom_model_params if custom_model_params is not None else {}) lags = create_lags( params["n_lags"], - generic_asset_type, + sensor, forecast_horizon, params["resolution"], use_periodicity, @@ -168,8 +163,7 @@ def create_initial_model_specs( # noqa: C901 "regressor_transformation", {} ) regressor_specs = configure_regressors_for_nearest_weather_sensor( - generic_asset, - generic_asset_type, + sensor, query_window, forecast_horizon, regressor_transformation, @@ -180,10 +174,10 @@ def create_initial_model_specs( # noqa: C901 ex_post_horizon = timedelta(hours=0) outcome_var_spec = TBSeriesSpecs( - name=generic_asset_type.name, - generic_asset_value_class=generic_asset_value_class, + name=sensor.generic_asset.generic_asset_type.name, + time_series_class=time_series_class, collect_params=dict( - generic_asset_names=[generic_asset.name], + old_sensor_names=[sensor.name], query_window=query_window, belief_horizon_window=(None, ex_post_horizon), ), @@ -192,7 +186,7 @@ def create_initial_model_specs( # noqa: C901 ) # Set defaults if needed if params.get("event_resolution", None) is None: - params["event_resolution"] = generic_asset.event_resolution + params["event_resolution"] = sensor.event_resolution if params.get("remodel_frequency", None) is None: params["remodel_frequency"] = timedelta(days=7) specs = ModelSpecs( @@ -214,8 +208,7 @@ def create_initial_model_specs( # noqa: C901 def _parameterise_forecasting_by_asset_and_asset_type( - generic_asset: Union[Asset, Market, WeatherSensor], - generic_asset_type: Union[AssetType, MarketType, WeatherSensorType], + sensor: Sensor, transform_to_normal: bool, ) -> dict: """Fill in the best parameters we know (generic or by asset (type))""" @@ -224,48 +217,44 @@ def _parameterise_forecasting_by_asset_and_asset_type( params["training_and_testing_period"] = timedelta(days=30) params["ratio_training_testing_data"] = 14 / 15 params["n_lags"] = 7 - params["resolution"] = generic_asset.event_resolution + params["resolution"] = sensor.event_resolution if transform_to_normal: params[ "outcome_var_transformation" - ] = get_normalization_transformation_by_asset_type(generic_asset_type) + ] = get_normalization_transformation_from_sensor_attributes(sensor) return params -def get_normalization_transformation_by_asset_type( - generic_asset_type: Union[AssetType, MarketType, WeatherSensorType] +def get_normalization_transformation_from_sensor_attributes( + sensor: Union[Sensor, WeatherSensor], ) -> Optional[Transformation]: """ Transform data to be normal, using the BoxCox transformation. Lambda parameter is chosen - according ot the asset type. + according to the asset type. """ - if isinstance(generic_asset_type, AssetType): - if (generic_asset_type.is_consumer and not generic_asset_type.is_producer) or ( - generic_asset_type.is_producer and not generic_asset_type.is_consumer - ): - return BoxCoxTransformation(lambda2=0.1) - else: - return None - elif isinstance(generic_asset_type, MarketType): - return None - elif isinstance(generic_asset_type, WeatherSensorType): - if generic_asset_type.name in ["wind_speed", "radiation"]: - # Values cannot be negative and are often zero - return BoxCoxTransformation(lambda2=0.1) - elif generic_asset_type.name == "temperature": - # Values can be positive or negative when given in degrees Celsius, but non-negative only in Kelvin - return BoxCoxTransformation(lambda2=273.16) - else: - return None + if ( + sensor.get_attribute("is_consumer") and not sensor.get_attribute("is_producer") + ) or ( + sensor.get_attribute("is_producer") and not sensor.get_attribute("is_consumer") + ): + return BoxCoxTransformation(lambda2=0.1) + elif sensor.generic_asset.generic_asset_type.name in [ + "wind_speed", + "radiation", + ]: + # Values cannot be negative and are often zero + return BoxCoxTransformation(lambda2=0.1) + elif sensor.generic_asset.generic_asset_type.name == "temperature": + # Values can be positive or negative when given in degrees Celsius, but non-negative only in Kelvin + return BoxCoxTransformation(lambda2=273.16) else: - raise TypeError("Unknown generic asset type.") + return None def configure_regressors_for_nearest_weather_sensor( - generic_asset, - generic_asset_type, + sensor: Sensor, query_window, horizon, regressor_transformation, # the regressor transformation can be passed in @@ -273,41 +262,38 @@ def configure_regressors_for_nearest_weather_sensor( ) -> List[TBSeriesSpecs]: """For Assets, we use weather data as regressors. Here, we configure them.""" regressor_specs = [] - if isinstance(generic_asset, Asset): - sensor_types = generic_asset_type.weather_correlations + sensor_types = sensor.get_attribute("weather_correlations") + if sensor_types: current_app.logger.info( - "For %s, I need sensors: %s" % (generic_asset, sensor_types) + "For %s, I need sensors: %s" % (sensor.name, sensor_types) ) for sensor_type in sensor_types: # Find nearest weather sensor - closest_sensor = find_closest_weather_sensor( - sensor_type, object=generic_asset - ) + closest_sensor = find_closest_weather_sensor(sensor_type, object=sensor) if closest_sensor is None: current_app.logger.warning( "No sensor found of sensor type %s to use as regressor for %s." - % (sensor_type, generic_asset) + % (sensor_type, sensor.name) ) else: current_app.logger.info( - "Using sensor %s as regressor for %s." - % (sensor_type, generic_asset) + "Using sensor %s as regressor for %s." % (sensor_type, sensor.name) ) # Collect the weather data for the requested time window regressor_specs_name = "%s_l0" % sensor_type if len(regressor_transformation.keys()) == 0 and transform_to_normal: regressor_transformation = ( - get_normalization_transformation_by_asset_type( - WeatherSensorType(name=sensor_type) + get_normalization_transformation_from_sensor_attributes( + closest_sensor, ) ) regressor_specs.append( TBSeriesSpecs( name=regressor_specs_name, - generic_asset_value_class=Weather, + time_series_class=Weather, collect_params=dict( - generic_asset_names=[closest_sensor.name], + old_sensor_names=[closest_sensor.name], query_window=query_window, belief_horizon_window=(horizon, None), ), diff --git a/flexmeasures/data/models/forecasting/utils.py b/flexmeasures/data/models/forecasting/utils.py index 895aa9261..e095ff01e 100644 --- a/flexmeasures/data/models/forecasting/utils.py +++ b/flexmeasures/data/models/forecasting/utils.py @@ -1,14 +1,14 @@ from typing import Tuple, List, Union from datetime import datetime, timedelta -from flexmeasures.data.config import db from flexmeasures.data.models.forecasting.exceptions import NotEnoughDataException +from flexmeasures.data.models.time_series import Sensor from flexmeasures.utils.time_utils import as_server_time def check_data_availability( - generic_asset, - generic_asset_value_class, + old_sensor_model, + old_time_series_data_model, forecast_start: datetime, forecast_end: datetime, query_window: Tuple[datetime, datetime], @@ -18,11 +18,11 @@ def check_data_availability( for training window and lagged variables. Otherwise, suggest new forecast period. TODO: we could also check regressor data, if we get regressor specs passed in here. """ - q = generic_asset_value_class.query.join(generic_asset.__class__).filter( - generic_asset.__class__.name == generic_asset.name + q = old_time_series_data_model.query.join(old_sensor_model.__class__).filter( + old_sensor_model.__class__.name == old_sensor_model.name ) - first_value = q.order_by(generic_asset_value_class.datetime.asc()).first() - last_value = q.order_by(generic_asset_value_class.datetime.desc()).first() + first_value = q.order_by(old_time_series_data_model.datetime.asc()).first() + last_value = q.order_by(old_time_series_data_model.datetime.desc()).first() if first_value is None: raise NotEnoughDataException( "No data available at all. Forecasting impossible." @@ -32,26 +32,26 @@ def check_data_availability( if query_window[0] < first: suggested_start = forecast_start + (first - query_window[0]) raise NotEnoughDataException( - f"Not enough data to forecast {generic_asset.name} " + f"Not enough data to forecast {old_sensor_model.name} " f"for the forecast window {as_server_time(forecast_start)} to {as_server_time(forecast_end)}. " f"I needed to query from {as_server_time(query_window[0])}, " - f"but the first value available is from {first} to {first + generic_asset.event_resolution}. " + f"but the first value available is from {first} to {first + old_sensor_model.event_resolution}. " f"Consider setting the start date to {as_server_time(suggested_start)}." ) - if query_window[1] - horizon > last + generic_asset.event_resolution: + if query_window[1] - horizon > last + old_sensor_model.event_resolution: suggested_end = forecast_end + (last - (query_window[1] - horizon)) raise NotEnoughDataException( - f"Not enough data to forecast {generic_asset.name} " + f"Not enough data to forecast {old_sensor_model.name} " f"for the forecast window {as_server_time(forecast_start)} to {as_server_time(forecast_end)}. " f"I needed to query until {as_server_time(query_window[1] - horizon)}, " - f"but the last value available is from {last} to {last + generic_asset.event_resolution}. " + f"but the last value available is from {last} to {last + old_sensor_model.event_resolution}. " f"Consider setting the end date to {as_server_time(suggested_end)}." ) def create_lags( n_lags: int, - generic_asset_type: db.Model, + sensor: Sensor, horizon: timedelta, resolution: timedelta, use_periodicity: bool, @@ -71,12 +71,11 @@ def create_lags( lags.append((L + number_of_nan_lags) * lag_period) # Include relevant measurements given the asset's periodicity - if use_periodicity and hasattr(generic_asset_type, "daily_seasonality"): - if generic_asset_type.daily_seasonality: - lag_period = timedelta(days=1) - number_of_nan_lags = 1 + (horizon - resolution) // lag_period - for L in range(n_lags): - lags.append((L + number_of_nan_lags) * lag_period) + if use_periodicity and sensor.get_attribute("daily_seasonality"): + lag_period = timedelta(days=1) + number_of_nan_lags = 1 + (horizon - resolution) // lag_period + for L in range(n_lags): + lags.append((L + number_of_nan_lags) * lag_period) # Remove possible double entries return list(set(lags)) diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index b1d791f81..585f00820 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -25,6 +25,7 @@ class GenericAsset(db.Model): name = db.Column(db.String(80), default="") latitude = db.Column(db.Float, nullable=True) longitude = db.Column(db.Float, nullable=True) + attributes = db.Column(db.JSON, nullable=False, default="{}") generic_asset_type_id = db.Column( db.Integer, db.ForeignKey("generic_asset_type.id"), nullable=False @@ -56,6 +57,10 @@ def location(self) -> Optional[Tuple[float, float]]: return self.latitude, self.longitude return None + def get_attribute(self, attribute: str): + if attribute in self.attributes: + return self.attributes[attribute] + def create_generic_asset(generic_asset_type: str, **kwargs) -> GenericAsset: """Create a GenericAsset and assigns it an id. @@ -80,7 +85,9 @@ def create_generic_asset(generic_asset_type: str, **kwargs) -> GenericAsset: if generic_asset_type is None: raise ValueError(f"Cannot find GenericAssetType {asset_type_name} in database.") new_generic_asset = GenericAsset( - name=kwargs["name"], generic_asset_type_id=generic_asset_type.id + name=kwargs["name"], + generic_asset_type_id=generic_asset_type.id, + attributes=kwargs["attributes"] if "attributes" in kwargs else {}, ) for arg in ("latitude", "longitude", "account_id"): if arg in kwargs: diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index 5d59e8640..d74a03ea6 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -7,6 +7,7 @@ from flexmeasures.data.config import db from flexmeasures.data.models.generic_assets import ( create_generic_asset, + GenericAsset, GenericAssetType, ) from flexmeasures.data.models.time_series import Sensor, TimedValue @@ -100,6 +101,23 @@ def entity_address(self) -> str: """Entity address under the latest fm scheme for entity addresses.""" return build_entity_address(dict(sensor_id=self.id), "sensor") + @property + def corresponding_sensor(self) -> Sensor: + return db.session.query(Sensor).get(self.id) + + @property + def generic_asset(self) -> GenericAsset: + return db.session.query(GenericAsset).get(self.corresponding_sensor.id) + + def get_attribute(self, attribute: str): + """Looks for the attribute on the corresponding Sensor. + + This should be used by all code to read these attributes, + over accessing them directly on this class, + as this table is in the process to be replaced by the Sensor table. + """ + return self.corresponding_sensor.get_attribute(attribute) + @property def price_unit(self) -> str: """Return the 'unit' property of the generic asset, just with a more insightful name.""" @@ -135,7 +153,7 @@ class Price(TimedValue, db.Model): @classmethod def make_query(cls, **kwargs) -> Query: """Construct the database query.""" - return super().make_query(asset_class=Market, **kwargs) + return super().make_query(old_sensor_class=Market, **kwargs) def __init__(self, **kwargs): super(Price, self).__init__(**kwargs) diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index 07a345213..bc725c138 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -3,8 +3,7 @@ import pandas as pd -from flexmeasures.data.models.assets import Asset -from flexmeasures.data.models.markets import Market +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.planning.solver import device_scheduler from flexmeasures.data.models.planning.utils import ( initialize_df, @@ -15,8 +14,7 @@ def schedule_battery( - asset: Asset, - market: Market, + sensor: Sensor, start: datetime, end: datetime, resolution: timedelta, @@ -31,7 +29,7 @@ def schedule_battery( # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( - market, (start, end), resolution, allow_trimmed_query_window=True + sensor, (start, end), resolution, allow_trimmed_query_window=True ) if soc_targets is not None: # soc targets are at the end of each time slot, while prices are indexed by the start of each time slot @@ -71,14 +69,16 @@ def schedule_battery( ) # shift "equals" constraint for target SOC by one resolution (the target defines a state at a certain time, # while the "equals" constraint defines what the total stock should be at the end of a time slot, # where the time slot is indexed by its starting time) - device_constraints[0]["min"] = (asset.min_soc_in_mwh - soc_at_start) * ( - timedelta(hours=1) / resolution + device_constraints[0]["min"] = ( + sensor.get_attribute("min_soc_in_mwh") - soc_at_start + ) * (timedelta(hours=1) / resolution) + device_constraints[0]["max"] = ( + sensor.get_attribute("max_soc_in_mwh") - soc_at_start + ) * (timedelta(hours=1) / resolution) + device_constraints[0]["derivative min"] = ( + sensor.get_attribute("capacity_in_mw") * -1 ) - device_constraints[0]["max"] = (asset.max_soc_in_mwh - soc_at_start) * ( - timedelta(hours=1) / resolution - ) - device_constraints[0]["derivative min"] = asset.capacity_in_mw * -1 - device_constraints[0]["derivative max"] = asset.capacity_in_mw + device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") # Set up EMS constraints (no additional constraints) columns = ["derivative max", "derivative min"] diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 5074cdcc7..6677c8e98 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -3,8 +3,7 @@ from pandas import Series, Timestamp -from flexmeasures.data.models.assets import Asset -from flexmeasures.data.models.markets import Market +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.planning.solver import device_scheduler from flexmeasures.data.models.planning.utils import ( initialize_df, @@ -15,8 +14,7 @@ def schedule_charging_station( - asset: Asset, - market: Market, + sensor: Sensor, start: datetime, end: datetime, resolution: timedelta, @@ -32,7 +30,7 @@ def schedule_charging_station( # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( - market, (start, end), resolution, allow_trimmed_query_window=True + sensor, (start, end), resolution, allow_trimmed_query_window=True ) # soc targets are at the end of each time slot, while prices are indexed by the start of each time slot soc_targets = soc_targets.tz_convert("UTC") @@ -81,14 +79,16 @@ def schedule_charging_station( ) - soc_at_start * ( timedelta(hours=1) / resolution ) # Lacking information about the battery's nominal capacity, we use the highest target value as the maximum state of charge - if asset.is_pure_consumer: + if sensor.get_attribute("is_pure_consumer"): device_constraints[0]["derivative min"] = 0 else: - device_constraints[0]["derivative min"] = asset.capacity_in_mw * -1 - if asset.is_pure_producer: + device_constraints[0]["derivative min"] = ( + sensor.get_attribute("capacity_in_mw") * -1 + ) + if sensor.get_attribute("is_pure_producer"): device_constraints[0]["derivative max"] = 0 else: - device_constraints[0]["derivative max"] = asset.capacity_in_mw + device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") # Set up EMS constraints (no additional constraints) columns = ["derivative max", "derivative min"] diff --git a/flexmeasures/data/models/planning/exceptions.py b/flexmeasures/data/models/planning/exceptions.py index da897abfc..3337120b1 100644 --- a/flexmeasures/data/models/planning/exceptions.py +++ b/flexmeasures/data/models/planning/exceptions.py @@ -1,2 +1,6 @@ +class UnknownMarketException(Exception): + pass + + class UnknownPricesException(Exception): pass diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index fd832b586..cdbc1191a 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -4,8 +4,8 @@ import numpy as np import pandas as pd -from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.markets import Market +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station from flexmeasures.utils.calculations import integrate_time_series @@ -14,54 +14,56 @@ def test_battery_solver_day_1(add_battery_assets): epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() - battery = Asset.query.filter(Asset.name == "Test battery").one_or_none() + battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + assert Market.query.get(battery.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 1)) end = as_server_time(datetime(2015, 1, 2)) resolution = timedelta(minutes=15) - soc_at_start = battery.soc_in_mwh - schedule = schedule_battery(battery, epex_da, start, end, resolution, soc_at_start) + soc_at_start = battery.get_attribute("soc_in_mwh") + schedule = schedule_battery(battery, start, end, resolution, soc_at_start) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): print(soc_schedule) # Check if constraints were met - assert min(schedule.values) >= battery.capacity_in_mw * -1 - assert max(schedule.values) <= battery.capacity_in_mw + assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 + assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") for soc in soc_schedule.values: - assert soc >= battery.min_soc_in_mwh - assert soc <= battery.max_soc_in_mwh + assert soc >= battery.get_attribute("min_soc_in_mwh") + assert soc <= battery.get_attribute("max_soc_in_mwh") def test_battery_solver_day_2(add_battery_assets): epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() - battery = Asset.query.filter(Asset.name == "Test battery").one_or_none() + battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + assert Market.query.get(battery.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) - soc_at_start = battery.soc_in_mwh - schedule = schedule_battery(battery, epex_da, start, end, resolution, soc_at_start) + soc_at_start = battery.get_attribute("soc_in_mwh") + schedule = schedule_battery(battery, start, end, resolution, soc_at_start) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): print(soc_schedule) # Check if constraints were met - assert min(schedule.values) >= battery.capacity_in_mw * -1 - assert max(schedule.values) <= battery.capacity_in_mw + assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 + assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") for soc in soc_schedule.values: - assert soc >= battery.min_soc_in_mwh - assert soc <= battery.max_soc_in_mwh + assert soc >= battery.get_attribute("min_soc_in_mwh") + assert soc <= battery.get_attribute("max_soc_in_mwh") # Check whether the resulting soc schedule follows our expectations for 8 expensive, 8 cheap and 8 expensive hours - assert ( - soc_schedule.iloc[-1] == battery.min_soc_in_mwh + assert soc_schedule.iloc[-1] == battery.get_attribute( + "min_soc_in_mwh" ) # Battery sold out at the end of its planning horizon - assert ( - soc_schedule.loc[start + timedelta(hours=8)] == battery.min_soc_in_mwh + assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( + "min_soc_in_mwh" ) # Sell what you begin with - assert ( - soc_schedule.loc[start + timedelta(hours=16)] == battery.max_soc_in_mwh + assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( + "max_soc_in_mwh" ) # Buy what you can to sell later @@ -82,9 +84,10 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): duration_until_target = timedelta(hours=2) epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() - charging_station = Asset.query.filter( - Asset.name == charging_station_name + charging_station = Sensor.query.filter( + Sensor.name == charging_station_name ).one_or_none() + assert Market.query.get(charging_station.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) @@ -94,15 +97,20 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): ) soc_targets.loc[target_soc_datetime] = target_soc consumption_schedule = schedule_charging_station( - charging_station, epex_da, start, end, resolution, soc_at_start, soc_targets + charging_station, start, end, resolution, soc_at_start, soc_targets ) soc_schedule = integrate_time_series( consumption_schedule, soc_at_start, decimal_precision=6 ) # Check if constraints were met - assert min(consumption_schedule.values) >= charging_station.capacity_in_mw * -1 - assert max(consumption_schedule.values) <= charging_station.capacity_in_mw + assert ( + min(consumption_schedule.values) + >= charging_station.get_attribute("capacity_in_mw") * -1 + ) + assert max(consumption_schedule.values) <= charging_station.get_attribute( + "capacity_in_mw" + ) print(consumption_schedule.head(12)) print(soc_schedule.head(12)) assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < 0.00001 diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index f6fc06cb6..97b2968ce 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -8,7 +8,11 @@ import timely_beliefs as tb from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.planning.exceptions import UnknownPricesException +from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.planning.exceptions import ( + UnknownMarketException, + UnknownPricesException, +) from flexmeasures.data.queries.utils import simplify_index @@ -56,8 +60,15 @@ def add_tiny_price_slope( return prices +def get_market(sensor: Sensor) -> Market: + market = Market.query.get(sensor.get_attribute("market_id")) + if market is None: + raise UnknownMarketException + return market + + def get_prices( - market: Market, + sensor: Sensor, query_window: Tuple[datetime, datetime], resolution: timedelta, allow_trimmed_query_window: bool = True, @@ -66,6 +77,10 @@ def get_prices( todo: set a horizon to avoid collecting prices that are not known at the time of constructing the schedule (this may require implementing a belief time for scheduling jobs). """ + + # Look for the applicable market + market = get_market(sensor) + price_bdf: tb.BeliefsDataFrame = Price.collect( market.name, query_window=query_window, diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index ce10d43b4..cce8cb86e 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -28,6 +28,8 @@ class Sensor(db.Model, tb.SensorDBMixin): """A sensor measures events. """ + attributes = db.Column(db.JSON, nullable=False, default="{}") + generic_asset_id = db.Column( db.Integer, db.ForeignKey("generic_asset.id", ondelete="CASCADE"), @@ -46,6 +48,7 @@ def __init__( name: str, generic_asset: Optional[GenericAsset] = None, generic_asset_id: Optional[int] = None, + attributes: Optional[dict] = None, **kwargs, ): assert (generic_asset is None) ^ ( @@ -57,12 +60,37 @@ def __init__( kwargs["generic_asset"] = generic_asset else: kwargs["generic_asset_id"] = generic_asset_id + if attributes is not None: + kwargs["attributes"] = attributes db.Model.__init__(self, **kwargs) @property def entity_address(self) -> str: return build_entity_address(dict(sensor_id=self.id), "sensor") + @property + def latitude(self) -> float: + return self.generic_asset.latitude + + @property + def longitude(self) -> float: + return self.generic_asset.longitude + + @property + def location(self) -> Optional[Tuple[float, float]]: + if None not in (self.latitude, self.longitude): + return self.latitude, self.longitude + return None + + def get_attribute(self, attribute: str): + """Looks for the attribute on the Sensor. + If not found, looks for the attribute on the Sensor's GenericAsset. + """ + if attribute in self.attributes: + return self.attributes[attribute] + elif attribute in self.generic_asset.attributes: + return self.generic_asset.attributes[attribute] + def latest_state( self, source: Optional[ @@ -383,8 +411,8 @@ def data_source_id(cls): # noqa: B902 @classmethod def make_query( cls, - asset_class: db.Model, - asset_names: Tuple[str], + old_sensor_class: db.Model, + old_sensor_names: Tuple[str], query_window: Tuple[Optional[datetime_type], Optional[datetime_type]], belief_horizon_window: Tuple[Optional[timedelta], Optional[timedelta]] = ( None, @@ -421,9 +449,11 @@ def make_query( if session is None: session = db.session start, end = query_window - query = create_beliefs_query(cls, session, asset_class, asset_names, start, end) + query = create_beliefs_query( + cls, session, old_sensor_class, old_sensor_names, start, end + ) query = add_belief_timing_filter( - cls, query, asset_class, belief_horizon_window, belief_time_window + cls, query, old_sensor_class, belief_horizon_window, belief_time_window ) if user_source_ids: query = add_user_source_filter(cls, query, user_source_ids) @@ -440,7 +470,7 @@ def make_query( @classmethod def collect( cls, - generic_asset_names: Union[str, List[str]], + old_sensor_names: Union[str, List[str]], query_window: Tuple[Optional[datetime_type], Optional[datetime_type]] = ( None, None, @@ -465,7 +495,7 @@ def collect( where time series data collection is implemented. """ return collect_time_series_data( - generic_asset_names=generic_asset_names, + old_sensor_names=old_sensor_names, make_query=cls.make_query, query_window=query_window, belief_horizon_window=belief_horizon_window, diff --git a/flexmeasures/data/models/utils.py b/flexmeasures/data/models/utils.py index 34c0d18b1..f45040931 100644 --- a/flexmeasures/data/models/utils.py +++ b/flexmeasures/data/models/utils.py @@ -1,31 +1,18 @@ from typing import Union, Type -from flexmeasures.data.models.assets import AssetType, Asset, Power -from flexmeasures.data.models.markets import MarketType, Market, Price -from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather +from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.weather import WeatherSensor, Weather -def determine_asset_type_by_asset( - generic_asset: Union[Asset, Market, WeatherSensor] -) -> Union[AssetType, MarketType, WeatherSensorType]: - if isinstance(generic_asset, Asset): - return generic_asset.asset_type - elif isinstance(generic_asset, Market): - return generic_asset.market_type - elif isinstance(generic_asset, WeatherSensor): - return generic_asset.sensor_type - else: - raise TypeError("Unknown generic asset type.") - - -def determine_asset_value_class_by_asset( - generic_asset: Union[Asset, Market, WeatherSensor] +def determine_old_time_series_class_by_old_sensor( + old_sensor: Union[Asset, Market, WeatherSensor] ) -> Type[Union[Power, Price, Weather]]: - if isinstance(generic_asset, Asset): + if isinstance(old_sensor, Asset): return Power - elif isinstance(generic_asset, Market): + elif isinstance(old_sensor, Market): return Price - elif isinstance(generic_asset, WeatherSensor): + elif isinstance(old_sensor, WeatherSensor): return Weather else: - raise TypeError("Unknown generic asset type.") + raise TypeError("Unknown old sensor type.") diff --git a/flexmeasures/data/models/weather.py b/flexmeasures/data/models/weather.py index 3d64a61cf..bad08fab1 100644 --- a/flexmeasures/data/models/weather.py +++ b/flexmeasures/data/models/weather.py @@ -11,6 +11,7 @@ from flexmeasures.data.models.time_series import Sensor, TimedValue from flexmeasures.data.models.generic_assets import ( create_generic_asset, + GenericAsset, GenericAssetType, ) from flexmeasures.utils.geo_utils import parse_lat_lng @@ -109,6 +110,23 @@ def entity_address(self) -> str: "sensor", ) + @property + def corresponding_sensor(self) -> Sensor: + return db.session.query(Sensor).get(self.id) + + @property + def generic_asset(self) -> GenericAsset: + return db.session.query(GenericAsset).get(self.corresponding_sensor.id) + + def get_attribute(self, attribute: str): + """Looks for the attribute on the corresponding Sensor. + + This should be used by all code to read these attributes, + over accessing them directly on this class, + as this table is in the process to be replaced by the Sensor table. + """ + return self.corresponding_sensor.get_attribute(attribute) + @property def weather_unit(self) -> float: """Return the 'unit' property of the generic asset, just with a more insightful name.""" @@ -213,7 +231,7 @@ class Weather(TimedValue, db.Model): @classmethod def make_query(cls, **kwargs) -> Query: """Construct the database query.""" - return super().make_query(asset_class=WeatherSensor, **kwargs) + return super().make_query(old_sensor_class=WeatherSensor, **kwargs) def __init__(self, **kwargs): super(Weather, self).__init__(**kwargs) diff --git a/flexmeasures/data/queries/utils.py b/flexmeasures/data/queries/utils.py index e2c025cba..211752045 100644 --- a/flexmeasures/data/queries/utils.py +++ b/flexmeasures/data/queries/utils.py @@ -15,22 +15,22 @@ def create_beliefs_query( cls: "ts.TimedValue", session: Session, - asset_class: db.Model, - asset_names: Tuple[str], + old_sensor_class: db.Model, + old_sensor_names: Tuple[str], start: Optional[datetime], end: Optional[datetime], ) -> Query: query = ( session.query( - asset_class.name, cls.datetime, cls.value, cls.horizon, DataSource + old_sensor_class.name, cls.datetime, cls.value, cls.horizon, DataSource ) .join(DataSource) .filter(cls.data_source_id == DataSource.id) - .join(asset_class) - .filter(asset_class.name.in_(asset_names)) + .join(old_sensor_class) + .filter(old_sensor_class.name.in_(old_sensor_names)) ) if start is not None: - query = query.filter((cls.datetime > start - asset_class.event_resolution)) + query = query.filter((cls.datetime > start - old_sensor_class.event_resolution)) if end is not None: query = query.filter((cls.datetime < end)) return query diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index de1f94a85..bdabb0f08 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -27,6 +27,7 @@ from flexmeasures.data.models.user import User, Role, RolesUsers from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import NotEnoughDataException +from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor from flexmeasures.utils.time_utils import ensure_local_timezone from flexmeasures.data.transactional import as_transaction @@ -204,8 +205,8 @@ def populate_time_series_forecasts( # noqa: C901 forecast_start: datetime, forecast_end: datetime, event_resolution: Optional[timedelta] = None, - generic_asset_type: Optional[str] = None, - generic_asset_id: Optional[int] = None, + old_sensor_class_name: Optional[str] = None, + old_sensor_id: Optional[int] = None, ): training_and_testing_period = timedelta(days=30) @@ -219,51 +220,54 @@ def populate_time_series_forecasts( # noqa: C901 name="Seita", type="demo script" ).one_or_none() - # List all generic assets for which to forecast. - # Look into asset type if no asset name is given. If an asset name is given, - generic_assets = [] - if generic_asset_id is None: - if generic_asset_type is None or generic_asset_type == "WeatherSensor": + # List all old sensors for which to forecast. + # Look into their type if no name is given. If a name is given, + old_sensors = [] + if old_sensor_id is None: + if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": sensors = WeatherSensor.query.all() - generic_assets.extend(sensors) - if generic_asset_type is None or generic_asset_type == "Asset": + old_sensors.extend(sensors) + if old_sensor_class_name is None or old_sensor_class_name == "Asset": assets = Asset.query.all() - generic_assets.extend(assets) - if generic_asset_type is None or generic_asset_type == "Market": + old_sensors.extend(assets) + if old_sensor_class_name is None or old_sensor_class_name == "Market": markets = Market.query.all() - generic_assets.extend(markets) + old_sensors.extend(markets) else: - if generic_asset_type is None: + if old_sensor_class_name is None: click.echo( "If you specify --asset-name, please also specify --asset-type, so we can look it up." ) return - if generic_asset_type == "WeatherSensor": + if old_sensor_class_name == "WeatherSensor": sensors = WeatherSensor.query.filter( - WeatherSensor.id == generic_asset_id + WeatherSensor.id == old_sensor_id ).one_or_none() if sensors is not None: - generic_assets.append(sensors) - if generic_asset_type == "Asset": - assets = Asset.query.filter(Asset.id == generic_asset_id).one_or_none() + old_sensors.append(sensors) + if old_sensor_class_name == "Asset": + assets = Asset.query.filter(Asset.id == old_sensor_id).one_or_none() if assets is not None: - generic_assets.append(assets) - if generic_asset_type == "Market": - markets = Market.query.filter(Market.id == generic_asset_id).one_or_none() + old_sensors.append(assets) + if old_sensor_class_name == "Market": + markets = Market.query.filter(Market.id == old_sensor_id).one_or_none() if markets is not None: - generic_assets.append(markets) - if not generic_assets: + old_sensors.append(markets) + if not old_sensors: click.echo("No such assets in db, so I will not add any forecasts.") return - # Make a model for each asset and horizon, make rolling forecasts and save to database. + # Make a model for each old sensor and horizon, make rolling forecasts and save to database. # We cannot use (faster) bulk save, as forecasts might become regressors in other forecasts. - for generic_asset in generic_assets: + for old_sensor in old_sensors: for horizon in horizons: try: default_model = lookup_model_specs_configurator() model_specs, model_identifier, model_fallback = default_model( - generic_asset=generic_asset, + sensor=old_sensor.corresponding_sensor, + time_series_class=determine_old_time_series_class_by_old_sensor( + old_sensor + ), forecast_start=forecast_start, forecast_end=forecast_end, forecast_horizon=horizon, @@ -277,7 +281,7 @@ def populate_time_series_forecasts( # noqa: C901 "from %s to %s with a training and testing period of %s, using %s ..." % ( naturaldelta(horizon), - generic_asset.name, + old_sensor.name, forecast_start, forecast_end, naturaldelta(training_and_testing_period), @@ -289,15 +293,15 @@ def populate_time_series_forecasts( # noqa: C901 start=forecast_start, end=forecast_end, model_specs=model_specs ) # Upsample to sensor resolution if needed - if forecasts.index.freq > pd.Timedelta(generic_asset.event_resolution): + if forecasts.index.freq > pd.Timedelta(old_sensor.event_resolution): forecasts = model_specs.outcome_var.resample_data( forecasts, time_window=(forecasts.index.min(), forecasts.index.max()), - expected_frequency=generic_asset.event_resolution, + expected_frequency=old_sensor.event_resolution, ) except (NotEnoughDataException, MissingData, NaNData) as e: click.echo( - "Skipping forecasts for asset %s: %s" % (generic_asset, str(e)) + "Skipping forecasts for old sensor %s: %s" % (old_sensor, str(e)) ) continue """ @@ -314,35 +318,35 @@ def populate_time_series_forecasts( # noqa: C901 """ beliefs = [] - if isinstance(generic_asset, Asset): + if isinstance(old_sensor, Asset): beliefs = [ Power( datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), horizon=horizon, value=value, - asset_id=generic_asset.id, + asset_id=old_sensor.id, data_source_id=data_source.id, ) for dt, value in forecasts.items() ] - elif isinstance(generic_asset, Market): + elif isinstance(old_sensor, Market): beliefs = [ Price( datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), horizon=horizon, value=value, - market_id=generic_asset.id, + market_id=old_sensor.id, data_source_id=data_source.id, ) for dt, value in forecasts.items() ] - elif isinstance(generic_asset, WeatherSensor): + elif isinstance(old_sensor, WeatherSensor): beliefs = [ Weather( datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), horizon=horizon, value=value, - sensor_id=generic_asset.id, + sensor_id=old_sensor.id, data_source_id=data_source.id, ) for dt, value in forecasts.items() @@ -350,7 +354,7 @@ def populate_time_series_forecasts( # noqa: C901 print( "Saving %s %s-forecasts for %s..." - % (len(beliefs), naturaldelta(horizon), generic_asset.name) + % (len(beliefs), naturaldelta(horizon), old_sensor.name) ) for belief in beliefs: db.session.add(belief) @@ -403,8 +407,8 @@ def depopulate_structure(db: SQLAlchemy): @as_transaction def depopulate_measurements( db: SQLAlchemy, - generic_asset_type: Optional[str] = None, - generic_asset_id: Optional[id] = None, + old_sensor_class_name: Optional[str] = None, + old_sensor_id: Optional[id] = None, ): click.echo("Depopulating (time series) data from the database %s ..." % db.engine) num_prices_deleted = 0 @@ -413,35 +417,35 @@ def depopulate_measurements( # TODO: simplify this when sensors moved to one unified table - if generic_asset_id is None: - if generic_asset_type is None or generic_asset_type == "Market": + if old_sensor_id is None: + if old_sensor_class_name is None or old_sensor_class_name == "Market": num_prices_deleted = ( db.session.query(Price) .filter(Price.horizon <= timedelta(hours=0)) .delete() ) - if generic_asset_type is None or generic_asset_type == "Asset": + if old_sensor_class_name is None or old_sensor_class_name == "Asset": num_power_measurements_deleted = ( db.session.query(Power) .filter(Power.horizon <= timedelta(hours=0)) .delete() ) - if generic_asset_type is None or generic_asset_type == "WeatherSensor": + if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": num_weather_measurements_deleted = ( db.session.query(Weather) .filter(Weather.horizon <= timedelta(hours=0)) .delete() ) else: - if generic_asset_type is None: + if old_sensor_class_name is None: click.echo( "If you specify --asset-name, please also specify --asset-type, so we can look it up." ) return - if generic_asset_type == "Market": + if old_sensor_class_name == "Market": market = ( db.session.query(Market) - .filter(Market.id == generic_asset_id) + .filter(Market.id == old_sensor_id) .one_or_none() ) if market is not None: @@ -454,11 +458,9 @@ def depopulate_measurements( else: num_prices_deleted = 0 - elif generic_asset_type == "Asset": + elif old_sensor_class_name == "Asset": asset = ( - db.session.query(Asset) - .filter(Asset.id == generic_asset_id) - .one_or_none() + db.session.query(Asset).filter(Asset.id == old_sensor_id).one_or_none() ) if asset is not None: num_power_measurements_deleted = ( @@ -470,10 +472,10 @@ def depopulate_measurements( else: num_power_measurements_deleted = 0 - elif generic_asset_type == "WeatherSensor": + elif old_sensor_class_name == "WeatherSensor": sensor = ( db.session.query(WeatherSensor) - .filter(WeatherSensor.id == generic_asset_id) + .filter(WeatherSensor.id == old_sensor_id) .one_or_none() ) if sensor is not None: @@ -494,8 +496,8 @@ def depopulate_measurements( @as_transaction def depopulate_prognoses( db: SQLAlchemy, - generic_asset_type: Optional[str] = None, - generic_asset_id: Optional[id] = None, + old_sensor_class_name: Optional[str] = None, + old_sensor_id: Optional[id] = None, ): click.echo( "Depopulating (time series) forecasts and schedules data from the database %s ..." @@ -510,20 +512,20 @@ def depopulate_prognoses( num_scheduling_jobs_deleted = app.queues["scheduling"].empty() # Clear all forecasts (data with positive horizon) - if generic_asset_id is None: - if generic_asset_type is None or generic_asset_type == "Market": + if old_sensor_id is None: + if old_sensor_class_name is None or old_sensor_class_name == "Market": num_prices_deleted = ( db.session.query(Price) .filter(Price.horizon > timedelta(hours=0)) .delete() ) - if generic_asset_type is None or generic_asset_type == "Asset": + if old_sensor_class_name is None or old_sensor_class_name == "Asset": num_power_measurements_deleted = ( db.session.query(Power) .filter(Power.horizon > timedelta(hours=0)) .delete() ) - if generic_asset_type is None or generic_asset_type == "WeatherSensor": + if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": num_weather_measurements_deleted = ( db.session.query(Weather) .filter(Weather.horizon > timedelta(hours=0)) @@ -532,13 +534,13 @@ def depopulate_prognoses( else: click.echo( "Depopulating (time series) forecasts and schedules for %s from the database %s ..." - % (generic_asset_id, db.engine) + % (old_sensor_id, db.engine) ) - if generic_asset_type == "Market": + if old_sensor_class_name == "Market": market = ( db.session.query(Market) - .filter(Market.id == generic_asset_id) + .filter(Market.id == old_sensor_id) .one_or_none() ) if market is not None: @@ -551,11 +553,9 @@ def depopulate_prognoses( else: num_prices_deleted = 0 - if generic_asset_type == "Asset": + if old_sensor_class_name == "Asset": asset = ( - db.session.query(Asset) - .filter(Asset.id == generic_asset_id) - .one_or_none() + db.session.query(Asset).filter(Asset.id == old_sensor_id).one_or_none() ) if asset is not None: num_power_measurements_deleted = ( @@ -567,10 +567,10 @@ def depopulate_prognoses( else: num_power_measurements_deleted = 0 - if generic_asset_type == "WeatherSensor": + if old_sensor_class_name == "WeatherSensor": sensor = ( db.session.query(WeatherSensor) - .filter(WeatherSensor.id == generic_asset_id) + .filter(WeatherSensor.id == old_sensor_id) .one_or_none() ) if sensor is not None: diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 230d914cd..a0d98c303 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -13,7 +13,7 @@ from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.utils import determine_asset_value_class_by_asset +from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor from flexmeasures.data.models.forecasting.utils import ( get_query_window, check_data_availability, @@ -47,7 +47,7 @@ class MisconfiguredForecastingJobException(Exception): def create_forecasting_jobs( timed_value_type: str, - asset_id: int, + old_sensor_id: int, start_of_roll: datetime, end_of_roll: datetime, resolution: timedelta = None, @@ -100,7 +100,7 @@ def create_forecasting_jobs( job = Job.create( make_rolling_viewpoint_forecasts, kwargs=dict( - asset_id=asset_id, + old_sensor_id=old_sensor_id, timed_value_type=timed_value_type, horizon=horizon, start=start_of_roll + horizon, @@ -123,7 +123,7 @@ def create_forecasting_jobs( def make_fixed_viewpoint_forecasts( - asset_id: int, + old_sensor_id: int, timed_value_type: str, horizon: timedelta, start: datetime, @@ -141,7 +141,7 @@ def make_fixed_viewpoint_forecasts( def make_rolling_viewpoint_forecasts( - asset_id: int, + old_sensor_id: int, timed_value_type: str, horizon: timedelta, start: datetime, @@ -157,8 +157,8 @@ def make_rolling_viewpoint_forecasts( Parameters ---------- - :param asset_id: int - To identify which asset to forecast + :param old_sensor_id: int + To identify which old sensor to forecast (note: old_sensor_id == sensor_id) :param timed_value_type: str This should go away after a refactoring - we now use it to create the DB entry for the forecasts :param horizon: timedelta @@ -181,15 +181,15 @@ def make_rolling_viewpoint_forecasts( # find out which model to run, fall back to latest recommended model_search_term = rq_job.meta.get("model_search_term", "linear-OLS") - # find asset - asset = get_asset(asset_id, timed_value_type) + # find old sensor + old_sensor = get_old_sensor(old_sensor_id, timed_value_type) click.echo( "Running Forecasting Job %s: %s for %s on model '%s', from %s to %s" - % (rq_job.id, asset, horizon, model_search_term, start, end) + % (rq_job.id, old_sensor, horizon, model_search_term, start, end) ) - if hasattr(asset, "market_type"): + if hasattr(old_sensor, "market_type"): ex_post_horizon = None # Todo: until we sorted out the ex_post_horizon, use all available price data else: ex_post_horizon = timedelta(hours=0) @@ -197,7 +197,8 @@ def make_rolling_viewpoint_forecasts( # Make model specs model_configurator = lookup_model_specs_configurator(model_search_term) model_specs, model_identifier, fallback_model_search_term = model_configurator( - generic_asset=asset, + sensor=old_sensor.corresponding_sensor, + time_series_class=determine_old_time_series_class_by_old_sensor(old_sensor), forecast_start=as_server_time(start), forecast_end=as_server_time(end), forecast_horizon=horizon, @@ -222,8 +223,8 @@ def make_rolling_viewpoint_forecasts( [lag * model_specs.frequency for lag in model_specs.lags], ) check_data_availability( - asset, - determine_asset_value_class_by_asset(asset), + old_sensor, + determine_old_time_series_class_by_old_sensor(old_sensor), start, end, query_window, @@ -244,7 +245,9 @@ def make_rolling_viewpoint_forecasts( click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) ts_value_forecasts = [ - make_timed_value(timed_value_type, asset_id, dt, value, horizon, data_source.id) + make_timed_value( + timed_value_type, old_sensor_id, dt, value, horizon, data_source.id + ) for dt, value in forecasts.items() ] @@ -308,44 +311,48 @@ def num_forecasts(start: datetime, end: datetime, resolution: timedelta) -> int: # and store everything in one time series database. -def get_asset( - asset_id: int, timed_value_type: str +def get_old_sensor( + old_sensor_id: int, timed_value_type: str ) -> Union[Asset, Market, WeatherSensor]: - """Get asset for this job. Maybe simpler once we redesign timed value classes (make a generic one)""" + """Get old sensor for this job. Maybe simpler once we redesign timed value classes (make a generic one)""" if timed_value_type not in ("Power", "Price", "Weather"): - raise Exception("Cannot get asset for asset_type '%s'" % timed_value_type) - asset = None + raise Exception( + "Cannot get old sensor for timed_value_type '%s'" % timed_value_type + ) + old_sensor = None if timed_value_type == "Power": - asset = Asset.query.filter_by(id=asset_id).one_or_none() + old_sensor = Asset.query.filter_by(id=old_sensor_id).one_or_none() elif timed_value_type == "Price": - asset = Market.query.filter_by(id=asset_id).one_or_none() + old_sensor = Market.query.filter_by(id=old_sensor_id).one_or_none() elif timed_value_type == "Weather": - asset = WeatherSensor.query.filter_by(id=asset_id).one_or_none() - if asset is None: + old_sensor = WeatherSensor.query.filter_by(id=old_sensor_id).one_or_none() + if old_sensor is None: raise Exception( - "Cannot find asset for value type %s with id %d" - % (timed_value_type, asset_id) + "Cannot find old sensor for value type %s with id %d" + % (timed_value_type, old_sensor_id) ) - return asset + return old_sensor def make_timed_value( timed_value_type: str, - asset_id: int, + old_sensor_id: int, dt: datetime, value: float, horizon: timedelta, data_source_id: int, ) -> Union[Power, Price, Weather]: if timed_value_type not in ("Power", "Price", "Weather"): - raise Exception("Cannot get asset for asset_type '%s'" % timed_value_type) + raise Exception( + "Cannot get old sensor for timed_value_type '%s'" % timed_value_type + ) ts_value = None if timed_value_type == "Power": ts_value = Power( datetime=dt, horizon=horizon, value=value, - asset_id=asset_id, + asset_id=old_sensor_id, data_source_id=data_source_id, ) elif timed_value_type == "Price": @@ -353,7 +360,7 @@ def make_timed_value( datetime=dt, horizon=horizon, value=value, - market_id=asset_id, + market_id=old_sensor_id, data_source_id=data_source_id, ) elif timed_value_type == "Weather": @@ -361,11 +368,12 @@ def make_timed_value( datetime=dt, horizon=horizon, value=value, - sensor_id=asset_id, + sensor_id=old_sensor_id, data_source_id=data_source_id, ) if ts_value is None: raise Exception( - "Cannot create asset of type %s with id %d" % (timed_value_type, asset_id) + "Cannot create timed value of type %s with id %d" + % (timed_value_type, old_sensor_id) ) return ts_value diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index 13d68af77..c73490a37 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -450,7 +450,7 @@ def load_sensor_data( # Query the sensors resource_data: Dict[str, tb.BeliefsDataFrame] = sensor_type.collect( - generic_asset_names=list(names_of_resource_sensors), + old_sensor_names=list(names_of_resource_sensors), query_window=(start, end), belief_horizon_window=belief_horizon_window, belief_time_window=belief_time_window, @@ -630,7 +630,7 @@ def find_closest_weather_sensor( Can be called with an object that has latitude and longitude properties, for example: - sensor = find_closest_weather_sensor("wind_speed", object=asset) + sensor = find_closest_weather_sensor("wind_speed", object=generic_asset) Can also be called with latitude and longitude parameters, for example: diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index 69a7e13ed..d94f88baa 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -11,9 +11,10 @@ from sqlalchemy.exc import IntegrityError from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.utils import save_to_session, get_data_source """ @@ -99,16 +100,19 @@ def make_schedule( rq_job = get_current_job() - # find asset - asset = Asset.query.filter_by(id=asset_id).one_or_none() + # find sensor + sensor = Sensor.query.filter_by(id=asset_id).one_or_none() click.echo( - "Running Scheduling Job %s: %s, from %s to %s" % (rq_job.id, asset, start, end) + "Running Scheduling Job %s: %s, from %s to %s" % (rq_job.id, sensor, start, end) ) if soc_at_start is None: - if start == asset.soc_datetime and asset.soc_in_mwh is not None: - soc_at_start = asset.soc_in_mwh + if ( + start == sensor.get_attribute("soc_datetime") + and sensor.get_attribute("soc_in_mwh") is not None + ): + soc_at_start = sensor.get_attribute("soc_in_mwh") else: soc_at_start = 0 @@ -117,20 +121,21 @@ def make_schedule( np.nan, index=pd.date_range(start, end, freq=resolution, closed="right") ) - if asset.asset_type_name == "battery": + if sensor.generic_asset.generic_asset_type.name == "battery": consumption_schedule = schedule_battery( - asset, asset.market, start, end, resolution, soc_at_start, soc_targets + sensor, start, end, resolution, soc_at_start, soc_targets ) - elif asset.asset_type_name in ( + elif sensor.generic_asset.generic_asset_type.name in ( "one-way_evse", "two-way_evse", ): consumption_schedule = schedule_charging_station( - asset, asset.market, start, end, resolution, soc_at_start, soc_targets + sensor, start, end, resolution, soc_at_start, soc_targets ) else: raise ValueError( - "Scheduling is not (yet) supported for asset type %s." % asset.asset_type + "Scheduling is not (yet) supported for asset type %s." + % sensor.generic_asset.generic_asset_type ) data_source = get_data_source( diff --git a/flexmeasures/data/services/time_series.py b/flexmeasures/data/services/time_series.py index aa8e4408c..e889f61c7 100644 --- a/flexmeasures/data/services/time_series.py +++ b/flexmeasures/data/services/time_series.py @@ -33,7 +33,7 @@ def collect_time_series_data( - generic_asset_names: Union[str, List[str]], + old_sensor_names: Union[str, List[str]], make_query: QueryCallType, query_window: Tuple[Optional[datetime], Optional[datetime]] = (None, None), belief_horizon_window: Tuple[Optional[timedelta], Optional[timedelta]] = ( @@ -48,12 +48,12 @@ def collect_time_series_data( resolution: Union[str, timedelta] = None, sum_multiple: bool = True, ) -> Union[tb.BeliefsDataFrame, Dict[str, tb.BeliefsDataFrame]]: - """Get time series data from one or more generic assets and rescale and re-package it to order. + """Get time series data from one or more old sensor models and rescale and re-package it to order. We can (lazily) look up by pickle, or load from the database. In the latter case, we are relying on time series data (power measurements and prices at this point) to have the same relevant column names (datetime, value). - We require a list of assets or market names to find the generic asset. + We require an old sensor model name of list thereof. If the time range parameters are None, they will be gotten from the session. Response is a 2D BeliefsDataFrame with the column event_value. If data from multiple assets is retrieved, the results are being summed. @@ -64,13 +64,13 @@ def collect_time_series_data( """ # convert to tuple to support caching the query - if isinstance(generic_asset_names, str): - generic_asset_names = (generic_asset_names,) - elif isinstance(generic_asset_names, list): - generic_asset_names = tuple(generic_asset_names) + if isinstance(old_sensor_names, str): + old_sensor_names = (old_sensor_names,) + elif isinstance(old_sensor_names, list): + old_sensor_names = tuple(old_sensor_names) bdf_dict = query_time_series_data( - generic_asset_names, + old_sensor_names, make_query, query_window, belief_horizon_window, @@ -89,7 +89,7 @@ def collect_time_series_data( def query_time_series_data( - generic_asset_names: Tuple[str], + old_sensor_names: Tuple[str], make_query: QueryCallType, query_window: Tuple[Optional[datetime], Optional[datetime]] = (None, None), belief_horizon_window: Tuple[Optional[timedelta], Optional[timedelta]] = ( @@ -121,7 +121,7 @@ def query_time_series_data( query_window = convert_query_window_for_demo(query_window) query = make_query( - asset_names=generic_asset_names, + old_sensor_names=old_sensor_names, query_window=query_window, belief_horizon_window=belief_horizon_window, belief_time_window=belief_time_window, @@ -135,10 +135,10 @@ def query_time_series_data( query.all(), columns=[col["name"] for col in query.column_descriptions] ) bdf_dict: Dict[str, tb.BeliefsDataFrame] = {} - for generic_asset_name in generic_asset_names: + for old_sensor_model_name in old_sensor_names: # Select data for the given asset - df = df_all_assets[df_all_assets["name"] == generic_asset_name].loc[ + df = df_all_assets[df_all_assets["name"] == old_sensor_model_name].loc[ :, df_all_assets.columns != "name" ] @@ -182,7 +182,7 @@ def query_time_series_data( if current_app.config.get("FLEXMEASURES_MODE", "") == "demo": df.index = df.index.map(lambda t: t.replace(year=datetime.now().year)) - sensor = find_sensor_by_name(name=generic_asset_name) + sensor = find_sensor_by_name(name=old_sensor_model_name) bdf = tb.BeliefsDataFrame(df.reset_index(), sensor=sensor) # re-sample data to the resolution we need to serve @@ -205,7 +205,7 @@ def query_time_series_data( if query_window[1] is not None: bdf = bdf[bdf.index.get_level_values("event_start") < query_window[1]] - bdf_dict[generic_asset_name] = bdf + bdf_dict[old_sensor_model_name] = bdf return bdf_dict diff --git a/flexmeasures/data/tests/test_forecasting_jobs.py b/flexmeasures/data/tests/test_forecasting_jobs.py index 9c6f9c6a4..f9be87b72 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs.py +++ b/flexmeasures/data/tests/test_forecasting_jobs.py @@ -62,7 +62,7 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): start_of_roll=as_server_time(datetime(2015, 1, 1, 6)), end_of_roll=as_server_time(datetime(2015, 1, 1, 7)), horizons=[horizon], - asset_id=wind_device_1.id, + old_sensor_id=wind_device_1.id, custom_model_params=custom_model_params(), ) @@ -111,7 +111,7 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ horizons=[ timedelta(hours=6) ], # so we want forecasts for 11.15pm (Jan 1st) to 0.15am (Jan 2nd) - asset_id=solar_device1.id, + old_sensor_id=solar_device1.id, custom_model_params=custom_model_params(), ) print("Job: %s" % job[0].id) @@ -179,7 +179,7 @@ def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data) start_of_roll=as_server_time(datetime(2016, 1, 1, 20)), end_of_roll=as_server_time(datetime(2016, 1, 1, 22)), horizons=[timedelta(hours=1)], - asset_id=solar_device1.id, + old_sensor_id=solar_device1.id, custom_model_params=custom_model_params(), ) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) @@ -194,7 +194,7 @@ def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): start_of_roll=as_server_time(datetime(2015, 1, 1, 21)), end_of_roll=as_server_time(datetime(2015, 1, 1, 23)), horizons=[timedelta(hours=18)], - asset_id=solar_device1.id, + old_sensor_id=solar_device1.id, custom_model_params=custom_model_params(), ) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) @@ -214,7 +214,7 @@ def test_failed_unknown_model(app, clean_redis, setup_test_data): start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], - asset_id=solar_device1.id, + old_sensor_id=solar_device1.id, model_search_term="no-one-knows-this", custom_model_params=cmp, ) diff --git a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py index 6ba275264..efdab97fa 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py @@ -28,7 +28,7 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis start_of_roll=as_server_time(datetime(2015, 1, 1, 10)), end_of_roll=as_server_time(datetime(2015, 1, 1, 13)), horizons=[horizon], - asset_id=wind_device2.id, + old_sensor_id=wind_device2.id, custom_model_params=custom_model_params(), ) print("Job: %s" % job[0].id) @@ -61,7 +61,7 @@ def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis) start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], - asset_id=solar_device1.id, + old_sensor_id=solar_device1.id, custom_model_params=custom_model_params(), ) print("Job: %s" % job[0].id) @@ -109,7 +109,7 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( start_of_roll=as_server_time(datetime(2015, 1, 1, hour_start)), end_of_roll=as_server_time(datetime(2015, 1, 1, hour_start + 2)), horizons=[horizon], - asset_id=solar_device1.id, + old_sensor_id=solar_device1.id, model_search_term=model_to_start_with, custom_model_params=cmp, ) diff --git a/flexmeasures/data/tests/test_scheduling_jobs.py b/flexmeasures/data/tests/test_scheduling_jobs.py index ec7f0a4cf..42a317bdb 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs.py +++ b/flexmeasures/data/tests/test_scheduling_jobs.py @@ -2,7 +2,8 @@ from datetime import datetime, timedelta from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.tests.utils import work_on_rq, exception_reporter from flexmeasures.data.services.scheduling import create_scheduling_job from flexmeasures.utils.time_utils import as_server_time @@ -14,7 +15,7 @@ def test_scheduling_a_battery(db, app, add_battery_assets, setup_test_data): - schedule has been made """ - battery = Asset.query.filter(Asset.name == "Test battery").one_or_none() + battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) diff --git a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py index 722b69adf..18fce924b 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py @@ -3,8 +3,9 @@ import numpy as np import pandas as pd -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.services.scheduling import create_scheduling_job from flexmeasures.data.tests.utils import work_on_rq, exception_reporter from flexmeasures.utils.time_utils import as_server_time @@ -23,8 +24,8 @@ def test_scheduling_a_charging_station( target_soc = 5 duration_until_target = timedelta(hours=2) - charging_station = Asset.query.filter( - Asset.name == "Test charging station" + charging_station = Sensor.query.filter( + Sensor.name == "Test charging station" ).one_or_none() start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) From 079296228ad413255db3ecc0321ce312451c8898 Mon Sep 17 00:00:00 2001 From: "create-issue-branch[bot]" <53036503+create-issue-branch[bot]@users.noreply.github.com> Date: Fri, 3 Dec 2021 12:52:42 +0100 Subject: [PATCH 02/46] API package gets and sets metadata on GenericAssets and Sensors (#243) Make JSON attributes mutable and have the API package get and set metadata on GenericAssets and Sensors. Applies to all version of the API, but excludes CRUD functionality on assets in API v2.0. * Create draft PR for #239 * Db migration that copies over attributes from old data models * - In Asset.__init__, copy over attributes to GenericAsset. - Start having our model_spec_factory get attributes it needs from GenericAsset. - Rename variables, specifically, variables that were annotated as a union of our old sensor models were named generic_asset, which was too easily confused with instances of our GenericAsset class. * model_spec_factory now gets its attributes from GenericAsset instead of old sensor model types * More renaming to avoid confusion * Have db migration copy over sensor attributes: unit, event_resolution and knowledge horizons * In Asset.__init__, copy over sensor attributes: unit, event_resolution and knowledge horizons * model_spec_factory now gets event_resolution and name from Sensor * Fix tests * Factor out use of corresponding_generic_asset attribute * Factor out use of corresponding_generic_asset attribute * More renaming * Pass time series class to model configurator explicitly * Finally, model_spec_factory doesn't need the old sensor model anymore * Allow setting the collect function name for TBSeriesSpecs to something custom * In Asset.__init__, copy over additional asset attributes to GenericAsset * Planning subpackage uses sensors instead of assets * Move some simple attributes in the UI package * Refactor to stop explicitly passing the market to the scheduler, and instead have the scheduler check for an applicable market * Revert "Move some simple attributes in the UI package", because this needs to be done jointly with moving over asset crud (which we test for) This reverts commit 56ff279cc19ce58a2ab3c56224bae5226c9fbd9c. * Create draft PR for #242 * Allow config setting specs as module variables, too. Support reading config setting specs from module (#237) * Support reading config setting specs from module * Add additional documentation (review suggestion) * Amend changelog entry * Create draft PR for #242 * Make JSON attributes mutable * Set Asset owner at initialization, so it is copied to GenericAsset * API v1_2 gets and sets asset attributes in the new data model * Deprecate use of Asset class in api v1_2 * Deprecate use of Asset class in api v1_3 * Revert "Allow config setting specs as module variables, too." This reverts commit 327b8b6f6bf617bff5cc54ca7f68e52cf7418e76. * Work around black issue by updating pre-commit-config * Deprecate use of Asset class in api v1_0 * Deprecate use of Asset class in part of api v2_0 * Fix docstring * Revert upgrade of black in pre-commit-config * Still battling black * Correct docstring * Deprecate use of Asset class in SensorField deserialization * Set Asset owner at initialization, so it is copied to GenericAsset * Simplify conftest * Add notes about how each attribute is to be copied from an old class to a new class * Rename variables * Refactor attribute copying to util function * In Market.__init__, copy over attributes from old models to new models * In Weather.__init__, copy over attributes from old models to new models * Deprecate use of Market class in SensorField deserialization * Deprecate use of WeatherSensor class in SensorField deserialization for fm1 * Simplify fm1 entity type schema for deserialization * Deprecate use of WeatherSensor class in SensorField deserialization for fm0 * Refactor query * Rename variable * Refactor query to arrive at a single combined query * Update todos * Intend to copy display_name to both GenericAsset and Sensor * Introduce Sensor attributes and copy most old model attributes there instead of to GenericAsset attributes * Adjust attribute copying in Asset.__init__ * Implement Sensor method to get an attribute * Give old sensor classes a generic_asset property * Give old sensor classes a get_attribute property * Derive Sensor class lat/lng location from GenericAsset * Get attributes from Sensor rather than from GenericAsset * Resolve merge conflict * Refactor after resolving merge conflict * Adjust attribute copying in Market.__init__ * Adjust attribute copying in WeatherSensor.__init__ * Post-merge cherry-pick: Set default attributes on generic assets, too * Post-merge cherry-pick: Add clarity to method docstring * Introduce has_attribute and set_attribute on the Sensor class, too * Remove redundant import * Get attributes from Sensor * Simplify (as requested in PR review) * Add docstring to migration util functions, explaining their parameters * Add module docstring * Add todos * Make Sensor attributes mutable, too * Avoid assumptions on db type (specifically, postgres) * Update upgrade migration docstring * Deprecate use of Market class in api v1_1 * Separate setup of market types and markets for tests, otherwise we run into flush issues * Remove redundant copy, now that we initialize super() first * Fix bugs: work on kwargs before copying from it and move up initialization of super() * Increase the chance of identifying a unique sensor by just its name, if you also know the name of its generic asset type * Simplify API tests by removing the owner id from event-type entity addresses, as the server ignores this optional field anyways * Simplify API tests by removing the owner id from event-type entity addresses, as the server ignores this optional field anyways * Deprecate use of Asset class in v1_3 tests * Deprecate use of Asset class in v1_2 tests * Deprecate use of Asset class in v1 tests * Deprecate use of Market class in v1_1 tests * Fix merge errors * Update string status code * Rename legacy migration module Co-authored-by: Flix6x Co-authored-by: F.N. Claessen Co-authored-by: Felix Claessen <30658763+Flix6x@users.noreply.github.com> --- flexmeasures/api/common/responses.py | 5 + flexmeasures/api/common/schemas/sensors.py | 83 ++++-------- .../api/common/schemas/tests/test_sensors.py | 3 + flexmeasures/api/common/utils/api_utils.py | 37 +++--- .../api/common/utils/migration_utils.py | 42 ++++++ flexmeasures/api/common/utils/validators.py | 44 +++---- flexmeasures/api/v1/implementations.py | 61 ++++----- flexmeasures/api/v1/tests/conftest.py | 18 +-- flexmeasures/api/v1/tests/test_api_v1.py | 4 +- .../api/v1/tests/test_api_v1_fresh_db.py | 6 +- flexmeasures/api/v1/tests/utils.py | 14 +- flexmeasures/api/v1_1/implementations.py | 49 +++---- flexmeasures/api/v1_1/tests/conftest.py | 2 +- flexmeasures/api/v1_1/tests/test_api_v1_1.py | 6 +- flexmeasures/api/v1_1/tests/utils.py | 12 +- flexmeasures/api/v1_2/implementations.py | 87 +++++++------ flexmeasures/api/v1_2/tests/test_api_v1_2.py | 46 +++---- flexmeasures/api/v1_2/tests/utils.py | 6 +- flexmeasures/api/v1_3/implementations.py | 96 ++++++++------ flexmeasures/api/v1_3/tests/test_api_v1_3.py | 46 ++++--- .../api/v1_3/tests/test_api_v1_3_fresh_db.py | 12 +- flexmeasures/api/v1_3/tests/utils.py | 6 +- .../api/v2_0/implementations/assets.py | 2 +- .../api/v2_0/implementations/sensors.py | 47 ++++--- flexmeasures/conftest.py | 12 +- ...es_from_old_data_models_to_GenericAsset.py | 22 ++-- flexmeasures/data/models/assets.py | 120 +++++++----------- flexmeasures/data/models/generic_assets.py | 11 +- .../data/models/legacy_migration_utils.py | 62 +++++++++ flexmeasures/data/models/markets.py | 67 ++++++++-- flexmeasures/data/models/time_series.py | 10 +- flexmeasures/data/models/weather.py | 58 ++++++++- flexmeasures/data/services/resources.py | 66 +++++++--- flexmeasures/data/tests/test_queries.py | 8 +- .../data/tests/test_time_series_services.py | 2 +- flexmeasures/utils/entity_address_utils.py | 2 +- 36 files changed, 708 insertions(+), 466 deletions(-) create mode 100644 flexmeasures/api/common/utils/migration_utils.py create mode 100644 flexmeasures/data/models/legacy_migration_utils.py diff --git a/flexmeasures/api/common/responses.py b/flexmeasures/api/common/responses.py index 46bd71409..46eab7caa 100644 --- a/flexmeasures/api/common/responses.py +++ b/flexmeasures/api/common/responses.py @@ -30,6 +30,11 @@ def my_logic(*args, **kwargs): return my_logic +@BaseMessage("The requested API version is deprecated for this feature.") +def deprecated_api_version(message: str) -> ResponseTuple: + return dict(result="Rejected", status="INVALID_API_VERSION", message=message), 400 + + @BaseMessage("Some of the data has already been received and successfully processed.") def already_received_and_successfully_processed(message: str) -> ResponseTuple: return ( diff --git a/flexmeasures/api/common/schemas/sensors.py b/flexmeasures/api/common/schemas/sensors.py index 4989b5fb4..27b16669b 100644 --- a/flexmeasures/api/common/schemas/sensors.py +++ b/flexmeasures/api/common/schemas/sensors.py @@ -3,7 +3,9 @@ from marshmallow import fields from flexmeasures.api import FMValidationError -from flexmeasures.api.common.utils.api_utils import get_weather_sensor_by +from flexmeasures.api.common.utils.api_utils import ( + get_sensor_by_generic_asset_type_and_location, +) from flexmeasures.utils.entity_address_utils import ( parse_entity_address, EntityAddressException, @@ -19,8 +21,8 @@ class EntityAddressValidationError(FMValidationError): class SensorField(fields.Str): - """Field that de-serializes to a Sensor, Asset, Market or WeatherSensor - and serializes back to an entity address (string).""" + """Field that de-serializes to a Sensor, + and serializes a Sensor, Asset, Market or WeatherSensor into an entity address (string).""" # todo: when Actuators also get an entity address, refactor this class to EntityField, # where an Entity represents anything with an entity address: we currently foresee Sensors and Actuators @@ -42,85 +44,50 @@ def __init__( def _deserialize( # noqa: C901 todo: the noqa can probably be removed after refactoring Asset/Market/WeatherSensor to Sensor self, value, attr, obj, **kwargs - ) -> Union[Sensor, Asset, Market, WeatherSensor]: - """De-serialize to a Sensor, Asset, Market or WeatherSensor.""" + ) -> Sensor: + """De-serialize to a Sensor.""" # TODO: After refactoring, unify 3 generic_asset cases -> 1 sensor case try: ea = parse_entity_address(value, self.entity_type, self.fm_scheme) if self.fm_scheme == "fm0": if self.entity_type == "connection": - asset = Asset.query.filter(Asset.id == ea["asset_id"]).one_or_none() - if asset is not None: - return asset - else: - raise EntityAddressValidationError( - f"Asset with entity address {value} doesn't exist." - ) - elif self.entity_type == "market": - market = Market.query.filter( - Market.name == ea["market_name"] - ).one_or_none() - if market is not None: - return market - else: - raise EntityAddressValidationError( - f"Market with entity address {value} doesn't exist." - ) - elif self.entity_type == "weather_sensor": - weather_sensor = get_weather_sensor_by( - ea["weather_sensor_type_name"], ea["latitude"], ea["longitude"] - ) - if weather_sensor is not None and isinstance( - weather_sensor, WeatherSensor - ): - return weather_sensor - else: - raise EntityAddressValidationError( - f"Weather sensor with entity address {value} doesn't exist." - ) - else: - if self.entity_type == "sensor": sensor = Sensor.query.filter( - Sensor.id == ea["sensor_id"] + Sensor.id == ea["asset_id"] ).one_or_none() if sensor is not None: return sensor - else: - raise EntityAddressValidationError( - f"Sensor with entity address {value} doesn't exist." - ) - elif self.entity_type == "connection": - asset = Asset.query.filter( - Asset.id == ea["sensor_id"] - ).one_or_none() - if asset is not None: - return asset else: raise EntityAddressValidationError( f"Asset with entity address {value} doesn't exist." ) elif self.entity_type == "market": - market = Market.query.filter( - Market.id == ea["sensor_id"] + sensor = Sensor.query.filter( + Sensor.name == ea["market_name"] ).one_or_none() - if market is not None: - return market + if sensor is not None: + return sensor else: raise EntityAddressValidationError( f"Market with entity address {value} doesn't exist." ) elif self.entity_type == "weather_sensor": - weather_sensor = WeatherSensor.query.filter( - WeatherSensor.id == ea["sensor_id"] - ).one_or_none() - if weather_sensor is not None and isinstance( - weather_sensor, WeatherSensor - ): - return weather_sensor + sensor = get_sensor_by_generic_asset_type_and_location( + ea["weather_sensor_type_name"], ea["latitude"], ea["longitude"] + ) + if sensor is not None: + return sensor else: raise EntityAddressValidationError( f"Weather sensor with entity address {value} doesn't exist." ) + else: + sensor = Sensor.query.filter(Sensor.id == ea["sensor_id"]).one_or_none() + if sensor is not None: + return sensor + else: + raise EntityAddressValidationError( + f"{self.entity_type} with entity address {value} doesn't exist." + ) except EntityAddressException as eae: raise EntityAddressValidationError(str(eae)) return NotImplemented diff --git a/flexmeasures/api/common/schemas/tests/test_sensors.py b/flexmeasures/api/common/schemas/tests/test_sensors.py index e4f22f82f..595016dc3 100644 --- a/flexmeasures/api/common/schemas/tests/test_sensors.py +++ b/flexmeasures/api/common/schemas/tests/test_sensors.py @@ -62,6 +62,9 @@ def test_sensor_field_straightforward( sf = SensorField(entity_type, fm_scheme) deser = sf.deserialize(entity_address, None, None) assert deser.name == exp_deserialization_name + if fm_scheme == "fm0" and entity_type in ("connection", "market", "weather_sensor"): + # These entity types are deserialized to Sensors, which have no entity address under the fm0 scheme + return assert sf.serialize(entity_type, {entity_type: deser}) == entity_address diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index da01ba5ef..3b192cb58 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -14,7 +14,9 @@ from flexmeasures.data import db from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType from flexmeasures.data.models.markets import Price +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.weather import WeatherSensor, Weather from flexmeasures.data.services.time_series import drop_unchanged_beliefs from flexmeasures.data.utils import save_to_session @@ -284,24 +286,26 @@ def asset_replace_name_with_id(connections_as_name: List[str]) -> List[str]: return connections_as_ea -def get_weather_sensor_by( - weather_sensor_type_name: str, latitude: float = 0, longitude: float = 0 -) -> Union[WeatherSensor, ResponseTuple]: +def get_sensor_by_generic_asset_type_and_location( + generic_asset_type_name: str, latitude: float = 0, longitude: float = 0 +) -> Union[Sensor, ResponseTuple]: """ - Search a weather sensor by type and location. - Can create a weather sensor if needed (depends on API mode) + Search a sensor by generic asset type and location. + Can create a sensor if needed (depends on API mode) and then inform the requesting user which one to use. """ - # Look for the WeatherSensor object - weather_sensor = ( - WeatherSensor.query.filter( - WeatherSensor.weather_sensor_type_name == weather_sensor_type_name - ) - .filter(WeatherSensor.latitude == latitude) - .filter(WeatherSensor.longitude == longitude) + # Look for the Sensor object + sensor = ( + Sensor.query.join(GenericAsset) + .join(GenericAssetType) + .filter(GenericAssetType.name == generic_asset_type_name) + .filter(GenericAsset.generic_asset_type_id == GenericAssetType.id) + .filter(GenericAsset.latitude == latitude) + .filter(GenericAsset.longitude == longitude) + .filter(Sensor.generic_asset_id == GenericAsset.id) .one_or_none() ) - if weather_sensor is None: + if sensor is None: create_sensor_if_unknown = False if current_app.config.get("FLEXMEASURES_MODE", "") == "play": create_sensor_if_unknown = True @@ -311,13 +315,14 @@ def get_weather_sensor_by( current_app.logger.info("CREATING NEW WEATHER SENSOR...") weather_sensor = WeatherSensor( name="Weather sensor for %s at latitude %s and longitude %s" - % (weather_sensor_type_name, latitude, longitude), - weather_sensor_type_name=weather_sensor_type_name, + % (generic_asset_type_name, latitude, longitude), + weather_sensor_type_name=generic_asset_type_name, latitude=latitude, longitude=longitude, ) db.session.add(weather_sensor) db.session.flush() # flush so that we can reference the new object in the current db session + sensor = weather_sensor.corresponding_sensor # or query and return the nearest sensor and let the requesting user post to that one else: @@ -333,7 +338,7 @@ def get_weather_sensor_by( ) else: return unrecognized_sensor() - return weather_sensor + return sensor def save_to_db( diff --git a/flexmeasures/api/common/utils/migration_utils.py b/flexmeasures/api/common/utils/migration_utils.py new file mode 100644 index 000000000..ad306fdd4 --- /dev/null +++ b/flexmeasures/api/common/utils/migration_utils.py @@ -0,0 +1,42 @@ +""" +This module is part of our data model migration (see https://github.com/SeitaBV/flexmeasures/projects/9). +It will become obsolete when we deprecate the fm0 scheme for entity addresses. +""" + +from typing import List, Optional, Union + +from flexmeasures.api.common.responses import ( + deprecated_api_version, + unrecognized_market, + ResponseTuple, +) +from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType +from flexmeasures.data.models.time_series import Sensor + + +def get_sensor_by_unique_name( + sensor_name: str, generic_asset_type_names: Optional[List[str]] = None +) -> Union[Sensor, ResponseTuple]: + """Search a sensor by unique name, returning a ResponseTuple if it is not found. + + Optionally specify a list of generic asset type names to filter on. + This function should be used only for sensors that correspond to the old Market class. + """ + # Look for the Sensor object + query = Sensor.query.filter(Sensor.name == sensor_name) + if generic_asset_type_names is not None: + query = ( + query.join(GenericAsset) + .join(GenericAssetType) + .filter(GenericAssetType.name.in_(generic_asset_type_names)) + .filter(GenericAsset.generic_asset_type_id == GenericAssetType.id) + .filter(Sensor.generic_asset_id == GenericAsset.id) + ) + sensor = query.all() + if len(sensor) == 0: + return unrecognized_market(sensor_name) + elif len(sensor) > 1: + return deprecated_api_version( + f"Multiple sensors were found named {sensor_name}." + ) + return sensor[0] diff --git a/flexmeasures/api/common/utils/validators.py b/flexmeasures/api/common/utils/validators.py index 1cbb1bbd9..25bfb825a 100644 --- a/flexmeasures/api/common/utils/validators.py +++ b/flexmeasures/api/common/utils/validators.py @@ -750,9 +750,9 @@ def post_data_checked_for_required_resolution( entity_type: str, fm_scheme: str ): # noqa: C901 """Decorator which checks that a POST request receives time series data with the event resolutions - required by the sensor (asset). It sets the "resolution" keyword argument. - If the resolution in the data is a multiple of the asset resolution, values are upsampled to the asset resolution. - Finally, this decorator also checks if all assets have the same event_resolution and complains otherwise. + required by the sensor. It sets the "resolution" keyword argument. + If the resolution in the data is a multiple of the sensor resolution, values are upsampled to the sensor resolution. + Finally, this decorator also checks if all sensors have the same event_resolution and complains otherwise. The resolution of the data is inferred from the duration and the number of values. Therefore, the decorator should follow after the values_required, period_required and assets_required decorators. @@ -800,30 +800,30 @@ def decorated_service(*args, **kwargs): (kwargs["start"] + kwargs["duration"]) - kwargs["start"] ) / len(kwargs["value_groups"][0]) - # Finding the required resolution for assets affected in this request + # Finding the required resolution for sensors affected in this request required_resolution = None - last_asset = None + last_sensor = None for asset_group in kwargs["generic_asset_name_groups"]: for asset_descriptor in asset_group: - # Getting the asset - generic_asset = SensorField(entity_type, fm_scheme).deserialize( + # Getting the sensor + sensor = SensorField(entity_type, fm_scheme).deserialize( asset_descriptor ) - if generic_asset is None: + if sensor is None: return unrecognized_asset( f"Failed to look up asset by {asset_descriptor}" ) - # Complain if assets don't all require the same resolution + # Complain if sensors don't all require the same resolution if ( required_resolution is not None - and generic_asset.event_resolution != required_resolution + and sensor.event_resolution != required_resolution ): return conflicting_resolutions( - f"Cannot send data for both {generic_asset} and {last_asset}." + f"Cannot send data for both {sensor} and {last_sensor}." ) - # Setting the resolution & remembering last looked-at asset - required_resolution = generic_asset.event_resolution - last_asset = generic_asset + # Setting the resolution & remembering last looked-at sensor + required_resolution = sensor.event_resolution + last_sensor = sensor # if inferred resolution is a multiple from required_solution, we can upsample_values # todo: next line fails on sensors with 0 resolution @@ -855,12 +855,12 @@ def decorated_service(*args, **kwargs): def get_data_downsampling_allowed(entity_type: str, fm_scheme: str): """Decorator which allows downsampling of data which a GET request returns. It checks for a form parameter "resolution". - If that is given and is a multiple of the asset's event_resolution, + If that is given and is a multiple of the sensor's event_resolution, downsampling is performed on the data. This is done by setting the "resolution" keyword parameter, which is obeyed by collect_time_series_data and used in resampling. - The original resolution of the data is the event_resolution of the asset. + The original resolution of the data is the event_resolution of the sensor. Therefore, the decorator should follow after the assets_required decorator. Example: @@ -891,19 +891,19 @@ def decorated_service(*args, **kwargs): ds_resolution = parse_duration(form["resolution"]) if ds_resolution is None: return invalid_resolution_str(form["resolution"]) - # Check if the resolution can be applied to all assets (if it is a multiple + # Check if the resolution can be applied to all sensors (if it is a multiple # of the event_resolution(s) and thus downsampling is possible) for asset_group in kwargs["generic_asset_name_groups"]: for asset_descriptor in asset_group: - generic_asset = SensorField(entity_type, fm_scheme).deserialize( + sensor = SensorField(entity_type, fm_scheme).deserialize( asset_descriptor ) - if generic_asset is None: + if sensor is None: return unrecognized_asset() - asset_resolution = generic_asset.event_resolution - if ds_resolution % asset_resolution != timedelta(minutes=0): + sensor_resolution = sensor.event_resolution + if ds_resolution % sensor_resolution != timedelta(minutes=0): return unapplicable_resolution( - f"{isodate.duration_isoformat(asset_resolution)} or a multiple hereof." + f"{isodate.duration_isoformat(sensor_resolution)} or a multiple hereof." ) kwargs["resolution"] = to_offset( isodate.parse_duration(form["resolution"]) diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 51c0df6cb..251acc1c1 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -11,9 +11,10 @@ parse_entity_address, EntityAddressException, ) -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.services.resources import get_assets +from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.services.resources import get_sensors from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.api.common.responses import ( invalid_domain, @@ -162,13 +163,11 @@ def collect_connection_and_value_groups( Returns value sign in accordance with USEF specs (with negative production and positive consumption). """ - from flask import current_app - current_app.logger.info("GETTING") - user_assets = get_assets() - if not user_assets: + user_sensors = get_sensors() + if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") - user_asset_ids = [asset.id for asset in user_assets] + user_sensor_ids = [sensor.id for sensor in user_sensors] end = start + duration value_groups = [] @@ -177,8 +176,8 @@ def collect_connection_and_value_groups( ) # Each connection in the old connection groups will be interpreted as a separate group for connections in connection_groups: - # Get the asset names - asset_names: List[str] = [] + # Get the sensor names + sensor_names: List[str] = [] for connection in connections: # Parse the entity address @@ -188,20 +187,20 @@ def collect_connection_and_value_groups( ) except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = connection_details["asset_id"] + sensor_id = connection_details["asset_id"] - # Look for the Asset object - if asset_id in user_asset_ids: - asset = Asset.query.filter(Asset.id == asset_id).one_or_none() + # Look for the Sensor object + if sensor_id in user_sensor_ids: + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() else: current_app.logger.warning("Cannot identify connection %s" % connection) return unrecognized_connection_group() - asset_names.append(asset.name) + sensor_names.append(sensor.name) # Get the power values # TODO: fill NaN for non-existing values power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.collect( - old_sensor_names=asset_names, + old_sensor_names=sensor_names, query_window=(start, end), resolution=resolution, belief_horizon_window=belief_horizon_window, @@ -245,10 +244,10 @@ def create_connection_and_value_groups( # noqa: C901 current_app.logger.info("POSTING POWER DATA") data_source = get_or_create_source(current_user) - user_assets = get_assets() - if not user_assets: + user_sensors = get_sensors() + if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") - user_asset_ids = [asset.id for asset in user_assets] + user_sensor_ids = [sensor.id for sensor in user_sensors] power_measurements = [] forecasting_jobs = [] for connection_group, value_group in zip(generic_asset_name_groups, value_groups): @@ -262,26 +261,30 @@ def create_connection_and_value_groups( # noqa: C901 ) except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = connection["asset_id"] + sensor_id = connection["asset_id"] - # Look for the Asset object - if asset_id in user_asset_ids: - asset = Asset.query.filter(Asset.id == asset_id).one_or_none() + # Look for the Sensor object + if sensor_id in user_sensor_ids: + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() else: current_app.logger.warning("Cannot identify connection %s" % connection) return unrecognized_connection_group() # Validate the sign of the values (following USEF specs with positive consumption and negative production) - if asset.is_pure_consumer and any(v < 0 for v in value_group): + if sensor.get_attribute("is_pure_consumer") and any( + v < 0 for v in value_group + ): extra_info = ( "Connection %s is registered as a pure consumer and can only receive non-negative values." - % asset.entity_address + % sensor.entity_address ) return power_value_too_small(extra_info) - elif asset.is_pure_producer and any(v > 0 for v in value_group): + elif sensor.get_attribute("is_pure_producer") and any( + v > 0 for v in value_group + ): extra_info = ( "Connection %s is registered as a pure producer and can only receive non-positive values." - % asset.entity_address + % sensor.entity_address ) return power_value_too_big(extra_info) @@ -299,7 +302,7 @@ def create_connection_and_value_groups( # noqa: C901 value=value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption horizon=h, - asset_id=asset.id, + asset_id=sensor_id, data_source_id=data_source.id, ) power_measurements.append(p) @@ -307,11 +310,11 @@ def create_connection_and_value_groups( # noqa: C901 # make forecasts, but only if the sent-in values are not forecasts themselves if horizon <= timedelta( hours=0 - ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset + ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( "Power", - asset_id, + sensor_id, start, start + duration, resolution=duration / len(value_group), diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 31f38f320..0c5e0d3da 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -38,6 +38,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices for asset_name in asset_names: asset = Asset( name=asset_name, + owner_id=test_anonymous_user.id, asset_type_name="test-type", event_resolution=timedelta(minutes=15), capacity_in_mw=1, @@ -45,7 +46,6 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices longitude=100, unit="MW", ) - asset.owner = test_anonymous_user assets.append(asset) db.session.add(asset) @@ -66,16 +66,16 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices for asset_name in asset_names: asset = Asset( name=asset_name, + owner_id=test_user.id, asset_type_name="test-type", - event_resolution=timedelta(minutes=15), + event_resolution=timedelta(minutes=15) + if not asset_name == "CS 4" + else timedelta(hours=1), capacity_in_mw=1, latitude=100, longitude=100, unit="MW", ) - asset.owner = test_user - if asset_name == "CS 4": - asset.event_resolution = timedelta(hours=1) assets.append(asset) db.session.add(asset) @@ -128,15 +128,15 @@ def setup_fresh_api_test_data(fresh_db, setup_roles_users_fresh_db): for asset_name in asset_names: asset = Asset( name=asset_name, + owner_id=test_user.id, asset_type_name="test-type", - event_resolution=timedelta(minutes=15), + event_resolution=timedelta(minutes=15) + if not asset_name == "CS 4" + else timedelta(hours=1), capacity_in_mw=1, latitude=100, longitude=100, unit="MW", ) - asset.owner = test_user - if asset_name == "CS 4": - asset.event_resolution = timedelta(hours=1) assets.append(asset) db.session.add(asset) diff --git a/flexmeasures/api/v1/tests/test_api_v1.py b/flexmeasures/api/v1/tests/test_api_v1.py index 5f62b80e0..875a3f3d9 100644 --- a/flexmeasures/api/v1/tests/test_api_v1.py +++ b/flexmeasures/api/v1/tests/test_api_v1.py @@ -21,7 +21,7 @@ verify_power_in_db, ) from flexmeasures.auth.error_handling import UNAUTH_ERROR_STATUS -from flexmeasures.data.models.assets import Asset +from flexmeasures.data.models.time_series import Sensor @pytest.mark.parametrize("query", [{}, {"access": "Prosumer"}]) @@ -231,7 +231,7 @@ def test_get_meter_data(db, app, client, message): ).sort_index() # check whether conftest.py did its job setting up the database with expected values - cs_5 = Asset.query.filter(Asset.name == "CS 5").one_or_none() + cs_5 = Sensor.query.filter(Sensor.name == "CS 5").one_or_none() verify_power_in_db(message, cs_5, expected_values, db, swapped_sign=True) # check whether the API returns the expected values (currently only the Prosumer data is returned) diff --git a/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py b/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py index e92749927..127ca2c82 100644 --- a/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py +++ b/flexmeasures/api/v1/tests/test_api_v1_fresh_db.py @@ -12,7 +12,7 @@ message_for_get_meter_data, count_connections_in_post_message, ) -from flexmeasures.data.models.assets import Asset +from flexmeasures.data.models.time_series import Sensor @pytest.mark.parametrize( @@ -72,8 +72,8 @@ def test_post_and_get_meter_data( assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon for asset_name in ("CS 1", "CS 2", "CS 3"): if asset_name in str(post_message): - asset = Asset.query.filter_by(name=asset_name).one_or_none() - assert asset.id in [job.kwargs["old_sensor_id"] for job in jobs] + sensor = Sensor.query.filter_by(name=asset_name).one_or_none() + assert sensor.id in [job.kwargs["old_sensor_id"] for job in jobs] # get meter data get_meter_data_response = client.get( diff --git a/flexmeasures/api/v1/tests/utils.py b/flexmeasures/api/v1/tests/utils.py index bf3735ac3..560d3d0f3 100644 --- a/flexmeasures/api/v1/tests/utils.py +++ b/flexmeasures/api/v1/tests/utils.py @@ -7,7 +7,8 @@ import pandas as pd from flexmeasures.api.common.utils.validators import validate_user_sources -from flexmeasures.data.models.assets import Power, Asset +from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.time_series import Sensor def message_for_get_meter_data( @@ -107,7 +108,7 @@ def count_connections_in_post_message(message: dict) -> int: def verify_power_in_db( - message, asset, expected_df: pd.DataFrame, db, swapped_sign: bool = False + message, sensor: Sensor, expected_df: pd.DataFrame, db, swapped_sign: bool = False ): """util method to verify that power data ended up in the database""" # todo: combine with verify_prices_in_db (in v1_1 utils) into a single function (NB different horizon filters) @@ -116,13 +117,16 @@ def verify_power_in_db( horizon = ( parse_duration(message["horizon"]) if "horizon" in message else timedelta(0) ) - resolution = asset.event_resolution + resolution = sensor.event_resolution query = ( db.session.query(Power.datetime, Power.value, Power.data_source_id) .filter((Power.datetime > start - resolution) & (Power.datetime < end)) .filter(Power.horizon == horizon) - .join(Asset) - .filter(Asset.name == asset.name) + .join( + Asset, Sensor + ) # we still need to join Asset, because Power.asset_id is still coupled to Asset rather than Sensor; see https://github.com/SeitaBV/flexmeasures/issues/252 + .filter(Power.asset_id == Sensor.id) + .filter(Sensor.name == sensor.name) ) if "source" in message: source_ids = validate_user_sources(message["source"]) diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index a99b26811..3c44b6c32 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -12,13 +12,13 @@ from flexmeasures.api.common.responses import ( invalid_domain, invalid_unit, - unrecognized_market, ResponseTuple, invalid_horizon, ) from flexmeasures.api.common.utils.api_utils import ( save_to_db, ) +from flexmeasures.api.common.utils.migration_utils import get_sensor_by_unique_name from flexmeasures.api.common.utils.validators import ( type_accepted, units_accepted, @@ -37,26 +37,28 @@ collect_connection_and_value_groups, create_connection_and_value_groups, ) -from flexmeasures.api.common.utils.api_utils import get_weather_sensor_by +from flexmeasures.api.common.utils.api_utils import ( + get_sensor_by_generic_asset_type_and_location, +) from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.markets import Price from flexmeasures.data.models.weather import Weather -from flexmeasures.data.services.resources import get_assets +from flexmeasures.data.services.resources import get_sensors from flexmeasures.data.services.forecasting import create_forecasting_jobs @as_json def get_connection_response(): - # Look up Asset objects - user_assets = get_assets() + # Look up Sensor objects + user_sensors = get_sensors() # Return entity addresses of assets - message = dict(connections=[asset.entity_address for asset in user_assets]) + message = dict(connections=[sensor.entity_address for sensor in user_sensors]) if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - message["names"] = [asset.name for asset in user_assets] + message["names"] = [sensor.name for sensor in user_sensors] else: - message["names"] = [asset.display_name for asset in user_assets] + message["names"] = [sensor.display_name for sensor in user_sensors] return message @@ -94,12 +96,13 @@ def post_price_data_response( return invalid_domain(str(eae)) market_name = ea["market_name"] - # Look for the Market object - market = Market.query.filter(Market.name == market_name).one_or_none() - if market is None: - return unrecognized_market(market_name) - elif unit != market.unit: - return invalid_unit("%s prices" % market.display_name, [market.unit]) + # Look for the Sensor object + sensor = get_sensor_by_unique_name(market_name, ["day_ahead", "tou_tariff"]) + if type(sensor) == ResponseTuple: + # Error message telling the user what to do + return sensor + if unit != sensor.unit: + return invalid_unit("%s prices" % sensor.name, [sensor.unit]) # Create new Price objects for j, value in enumerate(value_group): @@ -114,7 +117,7 @@ def post_price_data_response( datetime=dt, value=value, horizon=h, - market_id=market.id, + market_id=sensor.id, data_source_id=data_source.id, ) prices.append(p) @@ -125,7 +128,7 @@ def post_price_data_response( # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( "Price", - market.id, + sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, resolution=duration / len(value_group), @@ -178,12 +181,12 @@ def post_weather_data_response( # noqa: C901 if unit not in accepted_units: return invalid_unit(weather_sensor_type_name, accepted_units) - weather_sensor = get_weather_sensor_by( + sensor = get_sensor_by_generic_asset_type_and_location( weather_sensor_type_name, latitude, longitude ) - if type(weather_sensor) == ResponseTuple: + if type(sensor) == ResponseTuple: # Error message telling the user about the nearest weather sensor they can post to - return weather_sensor + return sensor # Create new Weather objects for j, value in enumerate(value_group): @@ -198,7 +201,7 @@ def post_weather_data_response( # noqa: C901 datetime=dt, value=value, horizon=h, - sensor_id=weather_sensor.id, + sensor_id=sensor.id, data_source_id=data_source.id, ) weather_measurements.append(w) @@ -208,11 +211,11 @@ def post_weather_data_response( # noqa: C901 "FLEXMEASURES_MODE", "" ) != "play" and horizon <= timedelta( hours=0 - ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset + ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( "Weather", - weather_sensor.id, + sensor.id, start, start + duration, resolution=duration / len(value_group), diff --git a/flexmeasures/api/v1_1/tests/conftest.py b/flexmeasures/api/v1_1/tests/conftest.py index 0ccaa871c..79ac409c9 100644 --- a/flexmeasures/api/v1_1/tests/conftest.py +++ b/flexmeasures/api/v1_1/tests/conftest.py @@ -39,6 +39,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices for asset_name in asset_names: asset = Asset( name=asset_name, + owner_id=test_user.id, asset_type_name="test-type", event_resolution=timedelta(minutes=15), capacity_in_mw=1, @@ -46,7 +47,6 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices longitude=100, unit="MW", ) - asset.owner = test_user assets.append(asset) db.session.add(asset) diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1.py b/flexmeasures/api/v1_1/tests/test_api_v1_1.py index 6d2fbd9cf..a97131ba9 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1.py @@ -24,7 +24,7 @@ from flexmeasures.auth.error_handling import UNAUTH_ERROR_STATUS from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.user import User -from flexmeasures.data.models.markets import Market +from flexmeasures.data.models.time_series import Sensor @pytest.mark.parametrize("query", [{}, {"access": "Prosumer"}]) @@ -178,10 +178,10 @@ def test_post_price_data_invalid_unit(setup_api_test_data, client, post_message) assert post_price_data_response.json["type"] == "PostPriceDataResponse" ea = parse_entity_address(post_message["market"], "market", fm_scheme="fm0") market_name = ea["market_name"] - market = Market.query.filter_by(name=market_name).one_or_none() + sensor = Sensor.query.filter_by(name=market_name).one_or_none() assert ( post_price_data_response.json["message"] - == invalid_unit("%s prices" % market.display_name, ["EUR/MWh"])[0]["message"] + == invalid_unit("%s prices" % sensor.name, ["EUR/MWh"])[0]["message"] ) diff --git a/flexmeasures/api/v1_1/tests/utils.py b/flexmeasures/api/v1_1/tests/utils.py index ab47b7cc9..8473dfece 100644 --- a/flexmeasures/api/v1_1/tests/utils.py +++ b/flexmeasures/api/v1_1/tests/utils.py @@ -10,6 +10,7 @@ from flexmeasures.api.common.schemas.sensors import SensorField from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.time_series import Sensor def message_for_get_prognosis( @@ -153,14 +154,17 @@ def verify_prices_in_db(post_message, values, db, swapped_sign: bool = False): start = parse_datetime(post_message["start"]) end = start + parse_duration(post_message["duration"]) horizon = parse_duration(post_message["horizon"]) - market = SensorField("market", "fm0").deserialize(post_message["market"]) - resolution = market.event_resolution + sensor = SensorField("market", "fm0").deserialize(post_message["market"]) + resolution = sensor.event_resolution query = ( db.session.query(Price.value, Price.horizon) .filter((Price.datetime > start - resolution) & (Price.datetime < end)) .filter(Price.horizon == horizon - (end - (Price.datetime + resolution))) - .join(Market) - .filter(Market.name == market.name) + .join( + Market, Sensor + ) # we still need to join Market, because Price.market_id is still coupled to Market rather than Sensor; see https://github.com/SeitaBV/flexmeasures/issues/252 + .filter(Price.market_id == Sensor.id) + .filter(Sensor.name == sensor.name) ) df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] diff --git a/flexmeasures/api/v1_2/implementations.py b/flexmeasures/api/v1_2/implementations.py index 85cc03bb9..350f93a73 100644 --- a/flexmeasures/api/v1_2/implementations.py +++ b/flexmeasures/api/v1_2/implementations.py @@ -1,4 +1,4 @@ -from datetime import timedelta +from datetime import datetime, timedelta import isodate from flask_json import as_json @@ -32,12 +32,12 @@ parse_isodate_str, ) from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.exceptions import ( UnknownMarketException, UnknownPricesException, ) +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.services.resources import has_assets, can_access_asset @@ -69,35 +69,39 @@ def get_device_message_response(generic_asset_name_groups, duration): ea = parse_entity_address(event, entity_type="event", fm_scheme="fm0") except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = ea["asset_id"] + sensor_id = ea["asset_id"] event_id = ea["event_id"] event_type = ea["event_type"] - # Look for the Asset object - asset = Asset.query.filter(Asset.id == asset_id).one_or_none() - if asset is None or not can_access_asset(asset): + # Look for the Sensor object + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() + if sensor is None or not can_access_asset(sensor): current_app.logger.warning( - "Cannot identify asset %s given the event." % event + "Cannot identify sensor given the event %s." % event ) return unrecognized_connection_group() - if asset.asset_type_name != "battery": + if sensor.generic_asset.generic_asset_type.name != "battery": return invalid_domain( - "API version 1.2 only supports device messages for batteries. Asset ID:%s is not a battery." - % asset_id + "API version 1.2 only supports device messages for batteries. " + "Sensor ID:%s does not belong to a battery." % sensor_id ) - if event_type != "soc" or event_id != asset.soc_udi_event_id: + if event_type != "soc" or event_id != sensor.generic_asset.get_attribute( + "soc_udi_event_id" + ): return unrecognized_event(event_id, event_type) - start = asset.soc_datetime - resolution = asset.event_resolution + start = datetime.fromisoformat( + sensor.generic_asset.get_attribute("soc_datetime") + ) + resolution = sensor.event_resolution # Schedule the asset try: schedule = schedule_battery( - asset.corresponding_sensor, + sensor, start, start + planning_horizon, resolution, - soc_at_start=asset.soc_in_mwh, + soc_at_start=sensor.generic_asset.get_attribute("soc_in_mwh"), prefer_charging_sooner=False, ) except UnknownPricesException: @@ -158,40 +162,47 @@ def post_udi_event_response(unit): # noqa: C901 except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = ea["asset_id"] + sensor_id = ea["asset_id"] event_id = ea["event_id"] event_type = ea["event_type"] if event_type != "soc": return unrecognized_event(event_id, event_type) - # get asset - asset: Asset = Asset.query.filter_by(id=asset_id).one_or_none() - if asset is None or not can_access_asset(asset): - current_app.logger.warning("Cannot identify asset via %s." % ea) + # Look for the Sensor object + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() + if sensor is None or not can_access_asset(sensor): + current_app.logger.warning("Cannot identify sensor via %s." % ea) return unrecognized_connection_group() - if asset.asset_type_name != "battery": + if sensor.generic_asset.generic_asset_type.name != "battery": return invalid_domain( - "API version 1.2 only supports UDI events for batteries. Asset ID:%s is not a battery." - % asset_id + "API version 1.2 only supports UDI events for batteries. " + "Sensor ID:%s does not belong to a battery." % sensor_id ) # unless on play, keep events ordered by entry date and ID if current_app.config.get("FLEXMEASURES_MODE") != "play": # do not allow new date to be after last date - if asset.soc_datetime is not None: - if asset.soc_datetime >= datetime: - msg = ( - "The date of the requested UDI event (%s) is earlier than the latest known date (%s)." - % (datetime, asset.soc_datetime) - ) - current_app.logger.warning(msg) - return invalid_datetime(msg) + if ( + isinstance(sensor.generic_asset.get_attribute("soc_datetime"), str) + and datetime.fromisoformat( + sensor.generic_asset.get_attribute("soc_datetime") + ) + >= datetime + ): + msg = "The date of the requested UDI event (%s) is earlier than the latest known date (%s)." % ( + datetime, + datetime.fromisoformat( + sensor.generic_asset.get_attribute("soc_datetime") + ), + ) + current_app.logger.warning(msg) + return invalid_datetime(msg) # check if udi event id is higher than existing - if asset.soc_udi_event_id is not None: - if asset.soc_udi_event_id >= event_id: - return outdated_event_id(event_id, asset.soc_udi_event_id) + soc_udi_event_id = sensor.generic_asset.get_attribute("soc_udi_event_id") + if soc_udi_event_id is not None and soc_udi_event_id >= event_id: + return outdated_event_id(event_id, soc_udi_event_id) # get value if "value" not in form: @@ -200,10 +211,10 @@ def post_udi_event_response(unit): # noqa: C901 if unit == "kWh": value = value / 1000.0 - # store new soc in asset - asset.soc_datetime = datetime - asset.soc_udi_event_id = event_id - asset.soc_in_mwh = value + # Store new soc info as GenericAsset attributes + sensor.generic_asset.set_attribute("soc_datetime", datetime.isoformat()) + sensor.generic_asset.set_attribute("soc_udi_event_id", event_id) + sensor.generic_asset.set_attribute("soc_in_mwh", value) db.session.commit() return request_processed("Request has been processed.") diff --git a/flexmeasures/api/v1_2/tests/test_api_v1_2.py b/flexmeasures/api/v1_2/tests/test_api_v1_2.py index 6f47b1e65..4d66b821b 100644 --- a/flexmeasures/api/v1_2/tests/test_api_v1_2.py +++ b/flexmeasures/api/v1_2/tests/test_api_v1_2.py @@ -9,13 +9,13 @@ message_for_get_device_message, message_for_post_udi_event, ) -from flexmeasures.data.models.assets import Asset +from flexmeasures.data.models.time_series import Sensor @pytest.mark.parametrize("message", [message_for_get_device_message()]) def test_get_device_message(client, message): - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") get_device_message_response = client.get( url_for("flexmeasures_api_v1_2.get_device_message"), @@ -57,8 +57,8 @@ def test_get_device_message(client, message): def test_get_device_message_mistyped_duration(client): auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") message = message_for_get_device_message() - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + message["event"] = message["event"] % sensor.id message["duration"] = "PTT6H" get_device_message_response = client.get( url_for("flexmeasures_api_v1_2.get_device_message"), @@ -75,8 +75,8 @@ def test_get_device_message_mistyped_duration(client): @pytest.mark.parametrize("message", [message_for_get_device_message(wrong_id=True)]) def test_get_device_message_wrong_event_id(client, message): - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") get_device_message_response = client.get( url_for("flexmeasures_api_v1_2.get_device_message"), @@ -96,10 +96,10 @@ def test_get_device_message_wrong_event_id(client, message): "message", [message_for_get_device_message(unknown_prices=True)] ) def test_get_device_message_unknown_prices(client, message): - asset = Asset.query.filter( - Asset.name == "Test battery with no known prices" + sensor = Sensor.query.filter( + Sensor.name == "Test battery with no known prices" ).one_or_none() - message["event"] = message["event"] % (asset.owner_id, asset.id) + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") get_device_message_response = client.get( url_for("flexmeasures_api_v1_2.get_device_message"), @@ -116,8 +116,8 @@ def test_get_device_message_unknown_prices(client, message): def test_post_udi_event(app, message): auth_token = None with app.test_client() as client: - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") post_udi_event_response = client.post( url_for("flexmeasures_api_v1_2.post_udi_event"), @@ -128,17 +128,17 @@ def test_post_udi_event(app, message): assert post_udi_event_response.status_code == 200 assert post_udi_event_response.json["type"] == "PostUdiEventResponse" - msg_dt = parse_datetime(message["datetime"]) + msg_dt = message["datetime"] # test database state - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - assert asset.soc_datetime == msg_dt - assert asset.soc_in_mwh == message["value"] / 1000 - assert asset.soc_udi_event_id == 204 + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + assert sensor.generic_asset.get_attribute("soc_datetime") == msg_dt + assert sensor.generic_asset.get_attribute("soc_in_mwh") == message["value"] / 1000 + assert sensor.generic_asset.get_attribute("soc_udi_event_id") == 204 # sending again results in an error, unless we increase the event ID with app.test_client() as client: - next_msg_dt = msg_dt + timedelta(minutes=5) + next_msg_dt = parse_datetime(msg_dt) + timedelta(minutes=5) message["datetime"] = next_msg_dt.strftime("%Y-%m-%dT%H:%M:%S.%f%z") post_udi_event_response = client.post( url_for("flexmeasures_api_v1_2.post_udi_event"), @@ -161,7 +161,9 @@ def test_post_udi_event(app, message): assert post_udi_event_response.json["type"] == "PostUdiEventResponse" # test database state - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - assert asset.soc_datetime == next_msg_dt - assert asset.soc_in_mwh == message["value"] / 1000 - assert asset.soc_udi_event_id == 205 + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + assert parse_datetime( + sensor.generic_asset.get_attribute("soc_datetime") + ) == parse_datetime(message["datetime"]) + assert sensor.generic_asset.get_attribute("soc_in_mwh") == message["value"] / 1000 + assert sensor.generic_asset.get_attribute("soc_udi_event_id") == 205 diff --git a/flexmeasures/api/v1_2/tests/utils.py b/flexmeasures/api/v1_2/tests/utils.py index 5ba6272b7..34ec71a8c 100644 --- a/flexmeasures/api/v1_2/tests/utils.py +++ b/flexmeasures/api/v1_2/tests/utils.py @@ -7,10 +7,10 @@ def message_for_get_device_message( message = { "type": "GetDeviceMessageRequest", "duration": "PT48H", - "event": "ea1.2018-06.localhost:%s:%s:203:soc", + "event": "ea1.2018-06.localhost:%s:203:soc", } if wrong_id: - message["event"] = "ea1.2018-06.localhost:%s:%s:9999:soc" + message["event"] = "ea1.2018-06.localhost:%s:9999:soc" if unknown_prices: message[ "duration" @@ -21,7 +21,7 @@ def message_for_get_device_message( def message_for_post_udi_event() -> dict: message = { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:%s:%s:204:soc", + "event": "ea1.2018-06.io.flexmeasures.company:%s:204:soc", "datetime": "2018-09-27T10:00:00+00:00", "value": 12.1, "unit": "kWh", diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index 2efa29ef8..cef4539df 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -1,5 +1,5 @@ # flake8: noqa: C901 -from datetime import timedelta +from datetime import datetime, timedelta import inflect import isodate @@ -39,8 +39,9 @@ parse_isodate_str, ) from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.services.resources import has_assets, can_access_asset from flexmeasures.data.services.scheduling import create_scheduling_job @@ -72,21 +73,25 @@ def get_device_message_response(generic_asset_name_groups, duration): ea = parse_entity_address(event, entity_type="event", fm_scheme="fm0") except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = ea["asset_id"] + sensor_id = ea["asset_id"] event_id = ea["event_id"] event_type = ea["event_type"] - # Look for the Asset object - asset = Asset.query.filter(Asset.id == asset_id).one_or_none() - if asset is None or not can_access_asset(asset): + # Look for the Sensor object + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() + if sensor is None or not can_access_asset(sensor): current_app.logger.warning( - "Cannot identify asset %s given the event." % event + "Cannot identify sensor given the event %s." % event ) return unrecognized_connection_group() - if asset.asset_type_name not in ("battery", "one-way_evse", "two-way_evse"): + if sensor.generic_asset.generic_asset_type.name not in ( + "battery", + "one-way_evse", + "two-way_evse", + ): return invalid_domain( f"API version 1.3 only supports device messages for batteries and Electric Vehicle Supply Equipment (EVSE). " - f"Asset ID:{asset_id} is not a battery or EVSE, but {p.a(asset.asset_type.display_name)}." + f"Sensor ID:{sensor_id} does not belong to a battery or EVSE, but {p.a(sensor.generic_asset.generic_asset_type.description)}." ) # Use the event_id to look up the schedule start @@ -95,9 +100,11 @@ def get_device_message_response(generic_asset_name_groups, duration): connection = current_app.queues["scheduling"].connection try: # First try the scheduling queue job = Job.fetch(event, connection=connection) - except NoSuchJobError: # Then try the most recent event_id (stored as an asset attribute) - if event_id == asset.soc_udi_event_id: - schedule_start = asset.soc_datetime + except NoSuchJobError: # Then try the most recent event_id (stored as a generic asset attribute) + if event_id == sensor.generic_asset.get_attribute("soc_udi_event_id"): + schedule_start = datetime.fromisoformat( + sensor.generic_asset.get_attribute("soc_datetime") + ) message = ( "Your UDI event is the most recent event for this device, but " ) @@ -161,12 +168,12 @@ def get_device_message_response(generic_asset_name_groups, duration): Power.data_source_id, func.min(Power.horizon).label("most_recent_belief_horizon"), ) - .filter(Power.asset_id == asset.id) + .filter(Power.asset_id == sensor_id) .group_by(Power.datetime, Power.data_source_id) .subquery() ) power_values = ( - Power.query.filter(Power.asset_id == asset.id) + Power.query.filter(Power.asset_id == sensor_id) .filter(Power.data_source_id == scheduler_source.id) .filter(Power.datetime >= schedule_start) .filter(Power.datetime < schedule_start + planning_horizon) @@ -191,7 +198,7 @@ def get_device_message_response(generic_asset_name_groups, duration): ) # Update the planning window - resolution = asset.event_resolution + resolution = sensor.event_resolution start = consumption_schedule.index[0] duration = min( duration, consumption_schedule.index[-1] + resolution - start @@ -247,40 +254,51 @@ def post_udi_event_response(unit): except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = ea["asset_id"] + sensor_id = ea["asset_id"] event_id = ea["event_id"] event_type = ea["event_type"] if event_type not in ("soc", "soc-with-targets"): return unrecognized_event_type(event_type) - # get asset - asset: Asset = Asset.query.filter_by(id=asset_id).one_or_none() - if asset is None or not can_access_asset(asset): - current_app.logger.warning("Cannot identify asset via %s." % ea) + # Look for the Sensor object + sensor = Sensor.query.filter_by(id=sensor_id).one_or_none() + if sensor is None or not can_access_asset(sensor): + current_app.logger.warning("Cannot identify sensor via %s." % ea) return unrecognized_connection_group() - if asset.asset_type_name not in ("battery", "one-way_evse", "two-way_evse"): + if sensor.generic_asset.generic_asset_type.name not in ( + "battery", + "one-way_evse", + "two-way_evse", + ): return invalid_domain( f"API version 1.3 only supports UDI events for batteries and Electric Vehicle Supply Equipment (EVSE). " - f"Asset ID:{asset_id} is not a battery or EVSE, but {p.a(asset.asset_type.display_name)}." + f"Sensor ID:{sensor_id} does not belong to a battery or EVSE, but {p.a(sensor.generic_asset.generic_asset_type.description)}." ) # unless on play, keep events ordered by entry date and ID if current_app.config.get("FLEXMEASURES_MODE") != "play": # do not allow new date to precede previous date - if asset.soc_datetime is not None: - if datetime < asset.soc_datetime: - msg = ( - "The date of the requested UDI event (%s) is earlier than the latest known date (%s)." - % (datetime, asset.soc_datetime) - ) - current_app.logger.warning(msg) - return invalid_datetime(msg) + if isinstance( + sensor.generic_asset.get_attribute("soc_datetime"), str + ) and datetime < datetime.fromisoformat( + sensor.generic_asset.get_attribute("soc_datetime") + ): + msg = "The date of the requested UDI event (%s) is earlier than the latest known date (%s)." % ( + datetime, + datetime.fromisoformat( + sensor.generic_asset.get_attribute("soc_datetime") + ), + ) + current_app.logger.warning(msg) + return invalid_datetime(msg) # check if udi event id is higher than existing - if asset.soc_udi_event_id is not None: - if asset.soc_udi_event_id >= event_id: - return outdated_event_id(event_id, asset.soc_udi_event_id) + if sensor.generic_asset.get_attribute("soc_udi_event_id") is not None: + if sensor.generic_asset.get_attribute("soc_udi_event_id") >= event_id: + return outdated_event_id( + event_id, sensor.generic_asset.get_attribute("soc_udi_event_id") + ) # get value if "value" not in form: @@ -297,7 +315,7 @@ def post_udi_event_response(unit): # set soc targets start_of_schedule = datetime end_of_schedule = datetime + current_app.config.get("FLEXMEASURES_PLANNING_HORIZON") - resolution = asset.event_resolution + resolution = sensor.event_resolution soc_targets = pd.Series( np.nan, index=pd.date_range( @@ -356,7 +374,7 @@ def post_udi_event_response(unit): soc_targets.loc[target_datetime] = target_value create_scheduling_job( - asset.id, + sensor_id, start_of_schedule, end_of_schedule, resolution=resolution, @@ -367,10 +385,10 @@ def post_udi_event_response(unit): enqueue=True, ) - # store new soc in asset - asset.soc_datetime = datetime - asset.soc_udi_event_id = event_id - asset.soc_in_mwh = value + # Store new soc info as GenericAsset attributes + sensor.generic_asset.set_attribute("soc_datetime", datetime.isoformat()) + sensor.generic_asset.set_attribute("soc_udi_event_id", event_id) + sensor.generic_asset.set_attribute("soc_in_mwh", value) db.session.commit() return request_processed() diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3.py b/flexmeasures/api/v1_3/tests/test_api_v1_3.py index 045c305d0..b8c501f4c 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3.py @@ -12,8 +12,9 @@ message_for_get_device_message, message_for_post_udi_event, ) -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.scheduling import handle_scheduling_exception from flexmeasures.utils.calculations import integrate_time_series @@ -21,8 +22,8 @@ @pytest.mark.parametrize("message", [message_for_get_device_message(wrong_id=True)]) def test_get_device_message_wrong_event_id(client, message): - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") get_device_message_response = client.get( url_for("flexmeasures_api_v1_3.get_device_message"), @@ -50,10 +51,8 @@ def test_post_udi_event_and_get_device_message( ): auth_token = None with app.test_client() as client: - asset = Asset.query.filter(Asset.name == asset_name).one_or_none() - asset_id = asset.id - asset_owner_id = asset.owner_id - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == asset_name).one_or_none() + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") post_udi_event_response = client.post( url_for("flexmeasures_api_v1_3.post_udi_event"), @@ -64,19 +63,19 @@ def test_post_udi_event_and_get_device_message( assert post_udi_event_response.status_code == 200 assert post_udi_event_response.json["type"] == "PostUdiEventResponse" - # test asset state in database - msg_dt = parse_datetime(message["datetime"]) - asset = Asset.query.filter(Asset.name == asset_name).one_or_none() - assert asset.soc_datetime == msg_dt - assert asset.soc_in_mwh == message["value"] / 1000 - assert asset.soc_udi_event_id == 204 + # test database state + msg_dt = message["datetime"] + sensor = Sensor.query.filter(Sensor.name == asset_name).one_or_none() + assert sensor.generic_asset.get_attribute("soc_datetime") == msg_dt + assert sensor.generic_asset.get_attribute("soc_in_mwh") == message["value"] / 1000 + assert sensor.generic_asset.get_attribute("soc_udi_event_id") == 204 # look for scheduling jobs in queue assert ( len(app.queues["scheduling"]) == 1 ) # only 1 schedule should be made for 1 asset job = app.queues["scheduling"].jobs[0] - assert job.kwargs["asset_id"] == asset_id + assert job.kwargs["asset_id"] == sensor.id assert job.kwargs["start"] == parse_datetime(message["datetime"]) assert job.id == message["event"] @@ -98,7 +97,7 @@ def test_post_udi_event_and_get_device_message( scheduler_source is not None ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.asset_id == asset_id) + Power.query.filter(Power.asset_id == sensor.id) .filter(Power.data_source_id == scheduler_source.id) .all() ) @@ -122,10 +121,7 @@ def test_post_udi_event_and_get_device_message( # try to retrieve the schedule through the getDeviceMessage api endpoint get_device_message = message_for_get_device_message() - get_device_message["event"] = get_device_message["event"] % ( - asset_owner_id, - asset_id, - ) + get_device_message["event"] = get_device_message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") get_device_message_response = client.get( url_for("flexmeasures_api_v1_3.get_device_message"), @@ -163,7 +159,7 @@ def test_post_udi_event_and_get_device_message( # sending again results in an error, unless we increase the event ID with app.test_client() as client: - next_msg_dt = msg_dt + timedelta(minutes=5) + next_msg_dt = parse_datetime(msg_dt) + timedelta(minutes=5) message["datetime"] = next_msg_dt.strftime("%Y-%m-%dT%H:%M:%S.%f%z") post_udi_event_response = client.post( url_for("flexmeasures_api_v1_3.post_udi_event"), @@ -186,10 +182,12 @@ def test_post_udi_event_and_get_device_message( assert post_udi_event_response.json["type"] == "PostUdiEventResponse" # test database state - asset = Asset.query.filter(Asset.name == asset_name).one_or_none() - assert asset.soc_datetime == next_msg_dt - assert asset.soc_in_mwh == message["value"] / 1000 - assert asset.soc_udi_event_id == 205 + sensor = Sensor.query.filter(Sensor.name == asset_name).one_or_none() + assert parse_datetime( + sensor.generic_asset.get_attribute("soc_datetime") + ) == parse_datetime(message["datetime"]) + assert sensor.generic_asset.get_attribute("soc_in_mwh") == message["value"] / 1000 + assert sensor.generic_asset.get_attribute("soc_udi_event_id") == 205 # process the scheduling queue work_on_rq(app.queues["scheduling"], exc_handler=handle_scheduling_exception) diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3_fresh_db.py b/flexmeasures/api/v1_3/tests/test_api_v1_3_fresh_db.py index 41de3f191..317288cec 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3_fresh_db.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3_fresh_db.py @@ -9,8 +9,8 @@ message_for_post_udi_event, message_for_get_device_message, ) -from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.services.scheduling import handle_scheduling_exception from flexmeasures.data.tests.utils import work_on_rq @@ -21,10 +21,8 @@ def test_post_udi_event_and_get_device_message_with_unknown_prices( ): auth_token = None with app.test_client() as client: - asset = Asset.query.filter(Asset.name == "Test battery").one_or_none() - asset_id = asset.id - asset_owner_id = asset.owner_id - message["event"] = message["event"] % (asset.owner_id, asset.id) + sensor = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") post_udi_event_response = client.post( url_for("flexmeasures_api_v1_3.post_udi_event"), @@ -40,7 +38,7 @@ def test_post_udi_event_and_get_device_message_with_unknown_prices( len(app.queues["scheduling"]) == 1 ) # only 1 schedule should be made for 1 asset job = app.queues["scheduling"].jobs[0] - assert job.kwargs["asset_id"] == asset_id + assert job.kwargs["asset_id"] == sensor.id assert job.kwargs["start"] == parse_datetime(message["datetime"]) assert job.id == message["event"] assert ( @@ -65,7 +63,7 @@ def test_post_udi_event_and_get_device_message_with_unknown_prices( # try to retrieve the schedule through the getDeviceMessage api endpoint message = message_for_get_device_message() - message["event"] = message["event"] % (asset_owner_id, asset_id) + message["event"] = message["event"] % sensor.id auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") get_device_message_response = client.get( url_for("flexmeasures_api_v1_3.get_device_message"), diff --git a/flexmeasures/api/v1_3/tests/utils.py b/flexmeasures/api/v1_3/tests/utils.py index 3ba13a2ba..3224833d3 100644 --- a/flexmeasures/api/v1_3/tests/utils.py +++ b/flexmeasures/api/v1_3/tests/utils.py @@ -7,10 +7,10 @@ def message_for_get_device_message( message = { "type": "GetDeviceMessageRequest", "duration": "PT48H", - "event": "ea1.2018-06.localhost:%s:%s:204:soc", + "event": "ea1.2018-06.localhost:%s:204:soc", } if wrong_id: - message["event"] = "ea1.2018-06.localhost:%s:%s:9999:soc" + message["event"] = "ea1.2018-06.localhost:%s:9999:soc" if unknown_prices: message[ "duration" @@ -24,7 +24,7 @@ def message_for_post_udi_event( ) -> dict: message = { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.localhost:%s:%s:204:soc", + "event": "ea1.2018-06.localhost:%s:204:soc", "datetime": "2015-01-01T00:00:00+00:00", "value": 12.1, "unit": "kWh", diff --git a/flexmeasures/api/v2_0/implementations/assets.py b/flexmeasures/api/v2_0/implementations/assets.py index 2e0fcbc4a..97a65d19e 100644 --- a/flexmeasures/api/v2_0/implementations/assets.py +++ b/flexmeasures/api/v2_0/implementations/assets.py @@ -157,7 +157,7 @@ def patch(db_asset, asset_data): @load_asset(admins_only=True) @as_json def delete(asset): - """Delete a task given its identifier""" + """Delete an asset given its identifier""" asset_name = asset.name db.session.delete(asset) db.session.commit() diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index 25215908e..d96948a66 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -15,7 +15,7 @@ ResponseTuple, ) from flexmeasures.api.common.utils.api_utils import ( - get_weather_sensor_by, + get_sensor_by_generic_asset_type_and_location, save_to_db, determine_belief_timing, ) @@ -31,12 +31,13 @@ period_required, values_required, ) -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.markets import Market, Price from flexmeasures.data.models.weather import Weather from flexmeasures.data.services.forecasting import create_forecasting_jobs -from flexmeasures.data.services.resources import get_assets +from flexmeasures.data.services.resources import get_sensors from flexmeasures.utils.entity_address_utils import ( parse_entity_address, EntityAddressException, @@ -172,13 +173,13 @@ def post_weather_data_response( # noqa: C901 if unit not in accepted_units: return invalid_unit(weather_sensor_type_name, accepted_units) - weather_sensor = get_weather_sensor_by( + sensor = get_sensor_by_generic_asset_type_and_location( weather_sensor_type_name, latitude, longitude ) # Convert to timely-beliefs terminology event_starts, belief_horizons = determine_belief_timing( - event_values, start, resolution, horizon, prior, weather_sensor + event_values, start, resolution, horizon, prior, sensor ) # Create new Weather objects @@ -188,7 +189,7 @@ def post_weather_data_response( # noqa: C901 datetime=event_start, value=event_value, horizon=belief_horizon, - sensor_id=weather_sensor.id, + sensor_id=sensor.id, data_source_id=data_source.id, ) for event_start, event_value, belief_horizon in zip( @@ -206,7 +207,7 @@ def post_weather_data_response( # noqa: C901 forecasting_jobs.extend( create_forecasting_jobs( "Weather", - weather_sensor.id, + sensor.id, start, start + duration, resolution=duration / len(event_values), @@ -302,10 +303,10 @@ def post_power_data( current_app.logger.info("POSTING POWER DATA") data_source = get_or_create_source(current_user) - user_assets = get_assets() - if not user_assets: + user_sensors = get_sensors() + if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") - user_asset_ids = [asset.id for asset in user_assets] + user_sensor_ids = [sensor.id for sensor in user_sensors] power_measurements = [] forecasting_jobs = [] for connection_group, event_values in zip(generic_asset_name_groups, value_groups): @@ -317,32 +318,36 @@ def post_power_data( ea = parse_entity_address(connection, entity_type="connection") except EntityAddressException as eae: return invalid_domain(str(eae)) - asset_id = ea["sensor_id"] + sensor_id = ea["sensor_id"] - # Look for the Asset object - if asset_id in user_asset_ids: - asset = Asset.query.filter(Asset.id == asset_id).one_or_none() + # Look for the Sensor object + if sensor_id in user_sensor_ids: + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() else: current_app.logger.warning("Cannot identify connection %s" % connection) return unrecognized_connection_group() # Validate the sign of the values (following USEF specs with positive consumption and negative production) - if asset.is_pure_consumer and any(v < 0 for v in event_values): + if sensor.get_attribute("is_pure_consumer") and any( + v < 0 for v in event_values + ): extra_info = ( "Connection %s is registered as a pure consumer and can only receive non-negative values." - % asset.entity_address + % sensor.entity_address ) return power_value_too_small(extra_info) - elif asset.is_pure_producer and any(v > 0 for v in event_values): + elif sensor.get_attribute("is_pure_producer") and any( + v > 0 for v in event_values + ): extra_info = ( "Connection %s is registered as a pure producer and can only receive non-positive values." - % asset.entity_address + % sensor.entity_address ) return power_value_too_big(extra_info) # Convert to timely-beliefs terminology event_starts, belief_horizons = determine_belief_timing( - event_values, start, resolution, horizon, prior, asset + event_values, start, resolution, horizon, prior, sensor ) # Create new Power objects @@ -353,7 +358,7 @@ def post_power_data( value=event_value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption horizon=belief_horizon, - asset_id=asset.id, + asset_id=sensor_id, data_source_id=data_source.id, ) for event_start, event_value, belief_horizon in zip( @@ -366,7 +371,7 @@ def post_power_data( forecasting_jobs.extend( create_forecasting_jobs( "Power", - asset_id, + sensor_id, start, start + duration, resolution=duration / len(event_values), diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index ed7ab3f13..67e3c0ee1 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -230,7 +230,7 @@ def create_test_markets(db) -> Dict[str, Market]: db.session.add(day_ahead) epex_da = Market( name="epex_da", - market_type=day_ahead, + market_type_name="day_ahead", event_resolution=timedelta(hours=1), unit="EUR/MWh", knowledge_horizon_fnc="x_days_ago_at_y_oclock", @@ -310,6 +310,7 @@ def setup_assets( for asset_name in ["wind-asset-1", "wind-asset-2", "solar-asset-1"]: asset = Asset( name=asset_name, + owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="wind" if "wind" in asset_name else "solar", event_resolution=timedelta(minutes=15), capacity_in_mw=1, @@ -321,7 +322,6 @@ def setup_assets( unit="MW", market_id=setup_markets["epex_da"].id, ) - asset.owner = setup_roles_users["Test Prosumer User"] db.session.add(asset) assets.append(asset) @@ -454,6 +454,7 @@ def create_test_battery_assets( test_battery = Asset( name="Test battery", + owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="battery", event_resolution=timedelta(minutes=15), capacity_in_mw=2, @@ -467,11 +468,11 @@ def create_test_battery_assets( market_id=setup_markets["epex_da"].id, unit="MW", ) - test_battery.owner = setup_roles_users["Test Prosumer User"] db.session.add(test_battery) test_battery_no_prices = Asset( name="Test battery with no known prices", + owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="battery", event_resolution=timedelta(minutes=15), capacity_in_mw=2, @@ -485,7 +486,6 @@ def create_test_battery_assets( market_id=setup_markets["epex_da"].id, unit="MW", ) - test_battery_no_prices.owner = setup_roles_users["Test Prosumer User"] db.session.add(test_battery_no_prices) return { "Test battery": test_battery, @@ -525,6 +525,7 @@ def add_charging_station_assets( charging_station = Asset( name="Test charging station", + owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="one-way_evse", event_resolution=timedelta(minutes=15), capacity_in_mw=2, @@ -538,11 +539,11 @@ def add_charging_station_assets( market_id=setup_markets["epex_da"].id, unit="MW", ) - charging_station.owner = setup_roles_users["Test Prosumer User"] db.session.add(charging_station) bidirectional_charging_station = Asset( name="Test charging station (bidirectional)", + owner_id=setup_roles_users["Test Prosumer User"].id, asset_type_name="two-way_evse", event_resolution=timedelta(minutes=15), capacity_in_mw=2, @@ -556,7 +557,6 @@ def add_charging_station_assets( market_id=setup_markets["epex_da"].id, unit="MW", ) - bidirectional_charging_station.owner = setup_roles_users["Test Prosumer User"] db.session.add(bidirectional_charging_station) return { "Test charging station": charging_station, diff --git a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py index d8128ff3e..b849d2b12 100644 --- a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py +++ b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py @@ -20,21 +20,23 @@ def upgrade(): - op.add_column( - "generic_asset", sa.Column("attributes", sa.JSON(), nullable=True, default="{}") - ) - op.add_column( - "sensor", sa.Column("attributes", sa.JSON(), nullable=True, default="{}") - ) - """ + Add attributes column to GenericAsset and Sensor tables. Then: - For each OldModel (Market/WeatherSensor/Asset), get the Sensor with the same id as the OldModel, and then get the GenericAsset of that Sensor. - - Add the OldModel's display name to the corresponding GenericAsset's attributes, - and other attributes we want to copy. + - Add the OldModel's display name to the corresponding GenericAsset's and Sensor's attributes, + and other attributes we want to copy. Most go to the Sensor. - Find the OldModelType (MarketType/WeatherSensorType/AssetType) of the OldModel, - and copy its seasonalities to the GenericAsset's attributes. + and copy its seasonalities and other attributes to the GenericAsset's or Sensor's attributes. """ + op.add_column( + "generic_asset", + sa.Column("attributes", sa.JSON(), nullable=True, default={}), + ) + op.add_column( + "sensor", + sa.Column("attributes", sa.JSON(), nullable=True, default={}), + ) # todo: find places where we look for seasonality and get it from the corresponding GenericAsset instead # todo: find places where we look for old_model_type and get it from the corresponding GenericAsset instead diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index 2cbd9eaac..0815b2039 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -6,6 +6,10 @@ from sqlalchemy.orm import Query from flexmeasures.data.config import db +from flexmeasures.data.models.legacy_migration_utils import ( + copy_old_sensor_attributes, + get_old_model_type, +) from flexmeasures.data.models.user import User from flexmeasures.data.models.time_series import Sensor, TimedValue from flexmeasures.data.models.generic_assets import ( @@ -119,90 +123,58 @@ def __init__(self, **kwargs): # Also keep track of ownership by creating a GenericAsset and assigning the new Sensor to it. if "id" not in kwargs: + asset_type = get_old_model_type( + kwargs, AssetType, "asset_type_name", "asset_type" + ) + # Set up generic asset - generic_assets_arg = kwargs.copy() - if "asset_type_name" in generic_assets_arg: - asset_type = db.session.query(AssetType).get( - generic_assets_arg["asset_type_name"] - ) - else: - asset_type = generic_assets_arg["asset_type"] - asset_type_attributes_for_generic_asset = [ - "can_curtail", - "can_shift", - ] - asset_attributes_for_generic_asset = [ - "display_name", - "min_soc_in_mwh", - "max_soc_in_mwh", - "soc_in_mwh", - "soc_datetime", - "soc_udi_event_id", - ] - generic_asset_attributes_from_asset_type = { - a: getattr(asset_type, a) - for a in asset_type_attributes_for_generic_asset - } - generic_asset_attributes_from_asset = { - a: getattr(self, a) - if not isinstance(getattr(self, a), datetime) - else getattr(self, a).isoformat() - for a in asset_attributes_for_generic_asset - } - generic_assets_arg = { - **generic_assets_arg, - **{ - "attributes": { - **generic_asset_attributes_from_asset_type, - **generic_asset_attributes_from_asset, - }, - }, + generic_asset_kwargs = { + **kwargs, + **copy_old_sensor_attributes( + self, + old_sensor_type_attributes=[ + "can_curtail", + "can_shift", + ], + old_sensor_attributes=[ + "display_name", + "min_soc_in_mwh", + "max_soc_in_mwh", + "soc_in_mwh", + "soc_datetime", + "soc_udi_event_id", + ], + old_sensor_type=asset_type, + ), } if "owner_id" in kwargs: owner = User.query.get(kwargs["owner_id"]) if owner: - generic_assets_arg.update(account_id=owner.account_id) - new_generic_asset = create_generic_asset("asset", **generic_assets_arg) + generic_asset_kwargs.update(account_id=owner.account_id) + new_generic_asset = create_generic_asset("asset", **generic_asset_kwargs) # Set up sensor - sensor_kwargs = dict( + new_sensor = Sensor( name=kwargs["name"], generic_asset=new_generic_asset, - ) - asset_type_attributes_for_sensor = [ - "is_consumer", - "is_producer", - "daily_seasonality", - "weekly_seasonality", - "yearly_seasonality", - "weather_correlations", - ] - asset_attributes_for_sensor = [ - "display_name", - "capacity_in_mw", - "market_id", - ] - sensor_attributes_from_asset_type = { - a: getattr(asset_type, a) for a in asset_type_attributes_for_sensor - } - sensor_attributes_from_asset = { - a: getattr(self, a) - if not isinstance(getattr(self, a), datetime) - else getattr(self, a).isoformat() - for a in asset_attributes_for_sensor - } - sensor_kwargs = { - **sensor_kwargs, - **{ - "attributes": { - **sensor_attributes_from_asset_type, - **sensor_attributes_from_asset, - }, - }, - } - new_sensor = Sensor( - **sensor_kwargs, + **copy_old_sensor_attributes( + self, + old_sensor_type_attributes=[ + "is_consumer", + "is_producer", + "daily_seasonality", + "weekly_seasonality", + "yearly_seasonality", + "weather_correlations", + ], + old_sensor_attributes=[ + "display_name", + "capacity_in_mw", + "market_id", + ], + old_sensor_type=asset_type, + ), ) db.session.add(new_sensor) db.session.flush() # generates the pkey for new_sensor diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index 585f00820..4c5b57516 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -1,5 +1,7 @@ from typing import Optional, Tuple +from sqlalchemy.ext.mutable import MutableDict + from flexmeasures.data import db @@ -25,7 +27,7 @@ class GenericAsset(db.Model): name = db.Column(db.String(80), default="") latitude = db.Column(db.Float, nullable=True) longitude = db.Column(db.Float, nullable=True) - attributes = db.Column(db.JSON, nullable=False, default="{}") + attributes = db.Column(MutableDict.as_mutable(db.JSON), nullable=False, default={}) generic_asset_type_id = db.Column( db.Integer, db.ForeignKey("generic_asset_type.id"), nullable=False @@ -61,6 +63,13 @@ def get_attribute(self, attribute: str): if attribute in self.attributes: return self.attributes[attribute] + def has_attribute(self, attribute: str) -> bool: + return attribute in self.attributes + + def set_attribute(self, attribute: str, value): + if self.has_attribute(attribute): + self.attributes[attribute] = value + def create_generic_asset(generic_asset_type: str, **kwargs) -> GenericAsset: """Create a GenericAsset and assigns it an id. diff --git a/flexmeasures/data/models/legacy_migration_utils.py b/flexmeasures/data/models/legacy_migration_utils.py new file mode 100644 index 000000000..f2360a427 --- /dev/null +++ b/flexmeasures/data/models/legacy_migration_utils.py @@ -0,0 +1,62 @@ +""" +This module is part of our data model migration (see https://github.com/SeitaBV/flexmeasures/projects/9). +It will become obsolete when Assets, Markets and WeatherSensors can no longer be initialized. +""" + +from datetime import datetime +from typing import List + +from flexmeasures.data import db + + +def copy_old_sensor_attributes( + old_sensor, + old_sensor_type_attributes: List[str], + old_sensor_attributes: List[str], + old_sensor_type: "Union[AssetType, MarketType, WeatherSensorType]" = None, # noqa F821 +) -> dict: + """ + :param old_sensor: an Asset, Market or WeatherSensor instance + :param old_sensor_type_attributes: names of attributes of the old sensor's type that should be copied + :param old_sensor_attributes: names of attributes of the old sensor that should be copied + :param old_sensor_type: the old sensor's type + :returns: dictionary containing an "attributes" dictionary with attribute names and values + """ + new_model_attributes_from_old_sensor_type = { + a: getattr(old_sensor_type, a) for a in old_sensor_type_attributes + } + new_model_attributes_from_old_sensor = { + a: getattr(old_sensor, a) + if not isinstance(getattr(old_sensor, a), datetime) + else getattr(old_sensor, a).isoformat() + for a in old_sensor_attributes + } + return dict( + attributes={ + **new_model_attributes_from_old_sensor_type, + **new_model_attributes_from_old_sensor, + } + ) + + +def get_old_model_type( + kwargs: dict, + old_sensor_type_class: "Type[Union[AssetType, MarketType, WeatherSensorType]]", # noqa F821 + old_sensor_type_name_key: str, + old_sensor_type_key: str, +) -> "Union[AssetType, MarketType, WeatherSensorType]": # noqa F821 + """ + :param kwargs: keyword arguments used to initialize a new Asset, Market or WeatherSensor + :param old_sensor_type_class: AssetType, MarketType, or WeatherSensorType + :param old_sensor_type_name_key: "asset_type_name", "market_type_name", or "weather_sensor_type_name" + :param old_sensor_type_key: "asset_type", "market_type", or "sensor_type" (instead of "weather_sensor_type"), + i.e. the name of the class attribute for the db.relationship to the type's class + :returns: the old sensor's type + """ + if old_sensor_type_name_key in kwargs: + old_sensor_type = db.session.query(old_sensor_type_class).get( + kwargs[old_sensor_type_name_key] + ) + else: + old_sensor_type = kwargs[old_sensor_type_key] + return old_sensor_type diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index d74a03ea6..c89c26be7 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -10,6 +10,10 @@ GenericAsset, GenericAssetType, ) +from flexmeasures.data.models.legacy_migration_utils import ( + copy_old_sensor_attributes, + get_old_model_type, +) from flexmeasures.data.models.time_series import Sensor, TimedValue from flexmeasures.utils.entity_address_utils import build_entity_address from flexmeasures.utils.flexmeasures_inflection import humanize @@ -28,14 +32,16 @@ class MarketType(db.Model): yearly_seasonality = db.Column(db.Boolean(), nullable=False, default=False) def __init__(self, **kwargs): + kwargs["name"] = kwargs["name"].replace(" ", "_").lower() + if "display_name" not in kwargs: + kwargs["display_name"] = humanize(kwargs["name"]) + + super(MarketType, self).__init__(**kwargs) + generic_asset_type = GenericAssetType( name=kwargs["name"], description=kwargs.get("hover_label", None) ) db.session.add(generic_asset_type) - super(MarketType, self).__init__(**kwargs) - self.name = self.name.replace(" ", "_").lower() - if "display_name" not in kwargs: - self.display_name = humanize(self.name) @property def preconditions(self) -> Dict[str, bool]: @@ -71,11 +77,47 @@ def __init__(self, **kwargs): kwargs["knowledge_horizon_par"] = { knowledge_horizons.ex_ante.__code__.co_varnames[1]: "PT0H" } + kwargs["name"] = kwargs["name"].replace(" ", "_").lower() + if "display_name" not in kwargs: + kwargs["display_name"] = humanize(kwargs["name"]) + + super(Market, self).__init__(**kwargs) # Create a new Sensor with unique id across assets, markets and weather sensors if "id" not in kwargs: - new_generic_asset = create_generic_asset("market", **kwargs) - new_sensor = Sensor(name=kwargs["name"], generic_asset=new_generic_asset) + + market_type = get_old_model_type( + kwargs, MarketType, "market_type_name", "market_type" + ) + + generic_asset_kwargs = { + **kwargs, + **copy_old_sensor_attributes( + self, + old_sensor_type_attributes=[], + old_sensor_attributes=[ + "display_name", + ], + old_sensor_type=market_type, + ), + } + new_generic_asset = create_generic_asset("market", **generic_asset_kwargs) + new_sensor = Sensor( + name=kwargs["name"], + generic_asset=new_generic_asset, + **copy_old_sensor_attributes( + self, + old_sensor_type_attributes=[ + "daily_seasonality", + "weekly_seasonality", + "yearly_seasonality", + ], + old_sensor_attributes=[ + "display_name", + ], + old_sensor_type=market_type, + ), + ) db.session.add(new_sensor) db.session.flush() # generates the pkey for new_sensor new_sensor_id = new_sensor.id @@ -83,11 +125,16 @@ def __init__(self, **kwargs): # The UI may initialize Market objects from API form data with a known id new_sensor_id = kwargs["id"] - super(Market, self).__init__(**kwargs) self.id = new_sensor_id - self.name = self.name.replace(" ", "_").lower() - if "display_name" not in kwargs: - self.display_name = humanize(self.name) + + # Copy over additional columns from (newly created) Market to (newly created) Sensor + if "id" not in kwargs: + db.session.add(self) + db.session.flush() # make sure to generate each column for the old sensor + new_sensor.unit = self.unit + new_sensor.event_resolution = self.event_resolution + new_sensor.knowledge_horizon_fnc = self.knowledge_horizon_fnc + new_sensor.knowledge_horizon_par = self.knowledge_horizon_par @property def entity_address_fm0(self) -> str: diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index cce8cb86e..bd0d9d5d8 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -4,6 +4,7 @@ from flask import current_app from sqlalchemy.ext.declarative import declared_attr +from sqlalchemy.ext.mutable import MutableDict from sqlalchemy.orm import Query, Session import timely_beliefs as tb import timely_beliefs.utils as tb_utils @@ -28,7 +29,7 @@ class Sensor(db.Model, tb.SensorDBMixin): """A sensor measures events. """ - attributes = db.Column(db.JSON, nullable=False, default="{}") + attributes = db.Column(MutableDict.as_mutable(db.JSON), nullable=False, default={}) generic_asset_id = db.Column( db.Integer, @@ -91,6 +92,13 @@ def get_attribute(self, attribute: str): elif attribute in self.generic_asset.attributes: return self.generic_asset.attributes[attribute] + def has_attribute(self, attribute: str) -> bool: + return attribute in self.attributes + + def set_attribute(self, attribute: str, value): + if self.has_attribute(attribute): + self.attributes[attribute] = value + def latest_state( self, source: Optional[ diff --git a/flexmeasures/data/models/weather.py b/flexmeasures/data/models/weather.py index bad08fab1..9354b6517 100644 --- a/flexmeasures/data/models/weather.py +++ b/flexmeasures/data/models/weather.py @@ -8,6 +8,10 @@ from sqlalchemy.schema import UniqueConstraint from flexmeasures.data.config import db +from flexmeasures.data.models.legacy_migration_utils import ( + copy_old_sensor_attributes, + get_old_model_type, +) from flexmeasures.data.models.time_series import Sensor, TimedValue from flexmeasures.data.models.generic_assets import ( create_generic_asset, @@ -73,11 +77,50 @@ class WeatherSensor(db.Model, tb.SensorDBMixin): ) def __init__(self, **kwargs): + kwargs["name"] = kwargs["name"].replace(" ", "_").lower() + + super(WeatherSensor, self).__init__(**kwargs) # Create a new Sensor with unique id across assets, markets and weather sensors if "id" not in kwargs: - new_generic_asset = create_generic_asset("weather_sensor", **kwargs) - new_sensor = Sensor(name=kwargs["name"], generic_asset=new_generic_asset) + + weather_sensor_type = get_old_model_type( + kwargs, + WeatherSensorType, + "weather_sensor_type_name", + "sensor_type", # NB not "weather_sensor_type" (slight inconsistency in this old sensor class) + ) + + generic_asset_kwargs = { + **kwargs, + **copy_old_sensor_attributes( + self, + old_sensor_type_attributes=[], + old_sensor_attributes=[ + "display_name", + ], + old_sensor_type=weather_sensor_type, + ), + } + new_generic_asset = create_generic_asset( + "weather_sensor", **generic_asset_kwargs + ) + new_sensor = Sensor( + name=kwargs["name"], + generic_asset=new_generic_asset, + **copy_old_sensor_attributes( + self, + old_sensor_type_attributes=[ + "daily_seasonality", + "weekly_seasonality", + "yearly_seasonality", + ], + old_sensor_attributes=[ + "display_name", + ], + old_sensor_type=weather_sensor_type, + ), + ) db.session.add(new_sensor) db.session.flush() # generates the pkey for new_sensor new_sensor_id = new_sensor.id @@ -85,9 +128,16 @@ def __init__(self, **kwargs): # The UI may initialize WeatherSensor objects from API form data with a known id new_sensor_id = kwargs["id"] - super(WeatherSensor, self).__init__(**kwargs) self.id = new_sensor_id - self.name = self.name.replace(" ", "_").lower() + + # Copy over additional columns from (newly created) WeatherSensor to (newly created) Sensor + if "id" not in kwargs: + db.session.add(self) + db.session.flush() # make sure to generate each column for the old sensor + new_sensor.unit = self.unit + new_sensor.event_resolution = self.event_resolution + new_sensor.knowledge_horizon_fnc = self.knowledge_horizon_fnc + new_sensor.knowledge_horizon_par = self.knowledge_horizon_par @property def entity_address_fm0(self) -> str: diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index c73490a37..d5c6662af 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -24,6 +24,7 @@ assets_share_location, ) from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.weather import Weather, WeatherSensor, WeatherSensorType from flexmeasures.data.models.user import User from flexmeasures.data.queries.utils import simplify_index @@ -53,6 +54,21 @@ def get_assets( return _build_asset_query(owner_id, order_by_asset_attribute, order_direction).all() +def get_sensors( + owner_id: Optional[int] = None, + order_by_asset_attribute: str = "id", + order_direction: str = "desc", +) -> List[Asset]: + """Return a list of all Sensor objects owned by current_user's organisation account + (or all users or a specific user - for this, admins can set an owner_id). + """ + # todo: switch to using authz from https://github.com/SeitaBV/flexmeasures/pull/234 + return [ + asset.corresponding_sensor + for asset in get_assets(owner_id, order_by_asset_attribute, order_direction) + ] + + def has_assets(owner_id: Optional[int] = None) -> bool: """Return True if the current user owns any assets. (or all users or a specific user - for this, admins can set an owner_id). @@ -60,12 +76,24 @@ def has_assets(owner_id: Optional[int] = None) -> bool: return _build_asset_query(owner_id).count() > 0 -def can_access_asset(asset: Asset) -> bool: - """Return True if the current user is an admin or the owner of the asset:""" +def can_access_asset(asset_or_sensor: Union[Asset, Sensor]) -> bool: + """Return True if: + - the current user is an admin, or + - the current user is the owner of the asset, or + - the current user's organisation account owns the corresponding generic asset, or + - the corresponding generic asset is public + + todo: refactor to `def can_access_sensor(sensor: Sensor) -> bool` once `ui.views.state.state_view` stops calling it with an Asset + todo: let this function use our new auth model (row-level authorization) + todo: deprecate this function in favor of an authz decorator on the API route + """ if current_user.is_authenticated: if current_user.has_role(ADMIN_ROLE): return True - if asset.owner == current_user: + if isinstance(asset_or_sensor, Sensor): + if asset_or_sensor.generic_asset.owner in (None, current_user.account): + return True + elif asset_or_sensor.owner == current_user: return True return False @@ -320,7 +348,7 @@ class Resource: asset_name_to_market_name_map: Dict[str, str] def __init__(self, name: str): - """ The resource name is either the name of an asset group or an individual asset. """ + """The resource name is either the name of an asset group or an individual asset.""" if name is None or name == "": raise Exception("Empty resource name passed (%s)" % name) self.name = name @@ -478,12 +506,12 @@ def price_data(self) -> Dict[str, tb.BeliefsDataFrame]: @cached_property def demand(self) -> Dict[str, tb.BeliefsDataFrame]: - """ Returns each asset's demand as positive values. """ + """Returns each asset's demand as positive values.""" return {k: get_demand_from_bdf(v) for k, v in self.power_data.items()} @cached_property def supply(self) -> Dict[str, tb.BeliefsDataFrame]: - """ Returns each asset's supply as positive values. """ + """Returns each asset's supply as positive values.""" return {k: get_supply_from_bdf(v) for k, v in self.power_data.items()} @cached_property @@ -492,17 +520,17 @@ def aggregate_power_data(self) -> tb.BeliefsDataFrame: @cached_property def aggregate_demand(self) -> tb.BeliefsDataFrame: - """ Returns aggregate demand as positive values. """ + """Returns aggregate demand as positive values.""" return get_demand_from_bdf(self.aggregate_power_data) @cached_property def aggregate_supply(self) -> tb.BeliefsDataFrame: - """ Returns aggregate supply (as positive values). """ + """Returns aggregate supply (as positive values).""" return get_supply_from_bdf(self.aggregate_power_data) @cached_property def total_demand(self) -> Dict[str, float]: - """ Returns each asset's total demand as a positive value. """ + """Returns each asset's total demand as a positive value.""" return { k: v.sum().values[0] * time_utils.resolution_to_hour_factor(v.event_resolution) @@ -511,7 +539,7 @@ def total_demand(self) -> Dict[str, float]: @cached_property def total_supply(self) -> Dict[str, float]: - """ Returns each asset's total supply as a positive value. """ + """Returns each asset's total supply as a positive value.""" return { k: v.sum().values[0] * time_utils.resolution_to_hour_factor(v.event_resolution) @@ -520,21 +548,21 @@ def total_supply(self) -> Dict[str, float]: @cached_property def total_aggregate_demand(self) -> float: - """ Returns total aggregate demand as a positive value. """ + """Returns total aggregate demand as a positive value.""" return self.aggregate_demand.sum().values[ 0 ] * time_utils.resolution_to_hour_factor(self.aggregate_demand.event_resolution) @cached_property def total_aggregate_supply(self) -> float: - """ Returns total aggregate supply as a positive value. """ + """Returns total aggregate supply as a positive value.""" return self.aggregate_supply.sum().values[ 0 ] * time_utils.resolution_to_hour_factor(self.aggregate_supply.event_resolution) @cached_property def revenue(self) -> Dict[str, float]: - """ Returns each asset's total revenue from supply. """ + """Returns each asset's total revenue from supply.""" revenue_dict = {} for k, v in self.supply.items(): market_name = self.asset_name_to_market_name_map[k] @@ -550,12 +578,12 @@ def revenue(self) -> Dict[str, float]: @cached_property def aggregate_revenue(self) -> float: - """ Returns total aggregate revenue from supply. """ + """Returns total aggregate revenue from supply.""" return sum(self.revenue.values()) @cached_property def cost(self) -> Dict[str, float]: - """ Returns each asset's total cost from demand. """ + """Returns each asset's total cost from demand.""" cost_dict = {} for k, v in self.demand.items(): market_name = self.asset_name_to_market_name_map[k] @@ -571,12 +599,12 @@ def cost(self) -> Dict[str, float]: @cached_property def aggregate_cost(self) -> float: - """ Returns total aggregate cost from demand. """ + """Returns total aggregate cost from demand.""" return sum(self.cost.values()) @cached_property def aggregate_profit_or_loss(self) -> float: - """ Returns total aggregate profit (loss is negative). """ + """Returns total aggregate profit (loss is negative).""" return self.aggregate_revenue - self.aggregate_cost def clear_cache(self): @@ -593,14 +621,14 @@ def __str__(self): def get_demand_from_bdf( bdf: Union[pd.DataFrame, tb.BeliefsDataFrame] ) -> Union[pd.DataFrame, tb.BeliefsDataFrame]: - """ Positive values become 0 and negative values become positive values. """ + """Positive values become 0 and negative values become positive values.""" return bdf.clip(upper=0).abs() def get_supply_from_bdf( bdf: Union[pd.DataFrame, tb.BeliefsDataFrame] ) -> Union[pd.DataFrame, tb.BeliefsDataFrame]: - """ Negative values become 0. """ + """Negative values become 0.""" return bdf.clip(lower=0) diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 8e19705fc..bd47ab748 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -233,12 +233,8 @@ def test_query_beliefs(setup_beliefs): tb.BeliefsDataFrame(sensor.beliefs), # doesn't allow filtering ] for bdf in bdfs: - assert sensor.event_resolution == timedelta( - hours=0 - ) # todo change to 1 after migrating Markets to Sensors - assert bdf.event_resolution == timedelta( - hours=0 - ) # todo change to 1 after migrating Markets to Sensors + assert sensor.event_resolution == timedelta(hours=1) + assert bdf.event_resolution == timedelta(hours=1) assert len(bdf) == setup_beliefs diff --git a/flexmeasures/data/tests/test_time_series_services.py b/flexmeasures/data/tests/test_time_series_services.py index ca233434f..32a36f144 100644 --- a/flexmeasures/data/tests/test_time_series_services.py +++ b/flexmeasures/data/tests/test_time_series_services.py @@ -79,7 +79,7 @@ def test_do_not_drop_changed_probabilistic_belief(setup_beliefs): ) & ( bdf.index.get_level_values("belief_time") - == pd.Timestamp("2021-03-28 14:00:00+00:00") + == pd.Timestamp("2021-03-27 9:00:00+00:00") ) ] new_belief = tb_utils.replace_multi_index_level( diff --git a/flexmeasures/utils/entity_address_utils.py b/flexmeasures/utils/entity_address_utils.py index 3f28abed4..33633c462 100644 --- a/flexmeasures/utils/entity_address_utils.py +++ b/flexmeasures/utils/entity_address_utils.py @@ -105,7 +105,7 @@ def parse_entity_address( # noqa: C901 fm_scheme: str = FM1_ADDR_SCHEME, ) -> dict: """ - Parses a generic asset name into an info dict. + Parses an entity address into an info dict. Returns a dictionary with scheme, naming_authority and various other fields, depending on the entity type and FlexMeasures scheme (see examples above). From 9cb39b9b3dbab660fcaf49c66b3a81f1b01f4550 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Fri, 3 Dec 2021 16:17:23 +0100 Subject: [PATCH 03/46] Issue 245 planners should check for required and optional sensor attributes more explicitly (#256) Code can now check for required Sensor attributes of an expected type, by calling `sensor.check_required_attributes`. * Allow passing a default attribute * Set default for optional attributes * Check for required Sensor attributes * Fix tests * Allow checking for attribute type, too * Allow checking for one of multiple allowed types * Check required attribute types * Move util function to become Sensor attribute * Move util function to separate module to resolve circular import --- flexmeasures/data/models/planning/battery.py | 9 +++ .../data/models/planning/charging_station.py | 7 ++- .../data/models/planning/exceptions.py | 8 +++ flexmeasures/data/models/time_series.py | 22 ++++++- flexmeasures/data/models/validation_utils.py | 59 +++++++++++++++++++ 5 files changed, 100 insertions(+), 5 deletions(-) create mode 100644 flexmeasures/data/models/validation_utils.py diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index bc725c138..265f90058 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -27,6 +27,15 @@ def schedule_battery( For the resulting consumption schedule, consumption is defined as positive values. """ + # Check for required Sensor attributes + sensor.check_required_attributes( + [ + ("capacity_in_mw", (float, int)), + ("max_soc_in_mwh", (float, int)), + ("min_soc_in_mwh", (float, int)), + ], + ) + # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( sensor, (start, end), resolution, allow_trimmed_query_window=True diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 6677c8e98..93b2f3c92 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -28,6 +28,9 @@ def schedule_charging_station( Todo: handle uni-directional charging by setting the "min" or "derivative min" constraint to 0 """ + # Check for required Sensor attributes + sensor.check_required_attributes([("capacity_in_mw", (float, int))]) + # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( sensor, (start, end), resolution, allow_trimmed_query_window=True @@ -79,13 +82,13 @@ def schedule_charging_station( ) - soc_at_start * ( timedelta(hours=1) / resolution ) # Lacking information about the battery's nominal capacity, we use the highest target value as the maximum state of charge - if sensor.get_attribute("is_pure_consumer"): + if sensor.get_attribute("is_pure_consumer", False): device_constraints[0]["derivative min"] = 0 else: device_constraints[0]["derivative min"] = ( sensor.get_attribute("capacity_in_mw") * -1 ) - if sensor.get_attribute("is_pure_producer"): + if sensor.get_attribute("is_pure_producer", False): device_constraints[0]["derivative max"] = 0 else: device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") diff --git a/flexmeasures/data/models/planning/exceptions.py b/flexmeasures/data/models/planning/exceptions.py index 3337120b1..50b404a20 100644 --- a/flexmeasures/data/models/planning/exceptions.py +++ b/flexmeasures/data/models/planning/exceptions.py @@ -1,6 +1,14 @@ +class MissingAttributeException(Exception): + pass + + class UnknownMarketException(Exception): pass class UnknownPricesException(Exception): pass + + +class WrongTypeAttributeException(Exception): + pass diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index bd0d9d5d8..c32ce0ec7 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Optional, Union, Tuple +from typing import Any, List, Dict, Optional, Union, Type, Tuple from datetime import datetime as datetime_type, timedelta import json @@ -22,6 +22,7 @@ from flexmeasures.data.models.charts import chart_type_to_chart_specs from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.generic_assets import GenericAsset +from flexmeasures.data.models.validation_utils import check_required_attributes from flexmeasures.utils.time_utils import server_now from flexmeasures.utils.flexmeasures_inflection import capitalize @@ -83,22 +84,37 @@ def location(self) -> Optional[Tuple[float, float]]: return self.latitude, self.longitude return None - def get_attribute(self, attribute: str): + def get_attribute(self, attribute: str, default: Any = None) -> Any: """Looks for the attribute on the Sensor. If not found, looks for the attribute on the Sensor's GenericAsset. + If not found, returns the default. """ if attribute in self.attributes: return self.attributes[attribute] elif attribute in self.generic_asset.attributes: return self.generic_asset.attributes[attribute] + return default def has_attribute(self, attribute: str) -> bool: - return attribute in self.attributes + return ( + attribute in self.attributes or attribute in self.generic_asset.attributes + ) def set_attribute(self, attribute: str, value): if self.has_attribute(attribute): self.attributes[attribute] = value + def check_required_attributes( + self, + attributes: List[Union[str, Tuple[str, Union[Type, Tuple[Type, ...]]]]], + ): + """Raises if any attribute in the list of attributes is missing, or has the wrong type. + + :param attributes: List of either an attribute name or a tuple of an attribute name and its allowed type + (the allowed type may also be a tuple of several allowed types) + """ + check_required_attributes(self, attributes) + def latest_state( self, source: Optional[ diff --git a/flexmeasures/data/models/validation_utils.py b/flexmeasures/data/models/validation_utils.py new file mode 100644 index 000000000..5dc6cf9c7 --- /dev/null +++ b/flexmeasures/data/models/validation_utils.py @@ -0,0 +1,59 @@ +from typing import List, Union, Tuple, Type + + +class MissingAttributeException(Exception): + pass + + +class WrongTypeAttributeException(Exception): + pass + + +def check_required_attributes( + sensor: "Sensor", # noqa: F821 + attributes: List[Union[str, Tuple[str, Union[Type, Tuple[Type, ...]]]]], +): + """Raises if any attribute in the list of attributes is missing on the Sensor, or has the wrong type. + + :param sensor: Sensor object to check for attributes + :param attributes: List of either an attribute name or a tuple of an attribute name and its allowed type + (the allowed type may also be a tuple of several allowed types) + """ + missing_attributes: List[str] = [] + wrong_type_attributes: List[Tuple[str, Type, Type]] = [] + for attribute_field in attributes: + if isinstance(attribute_field, str): + attribute_name = attribute_field + expected_attribute_type = None + elif isinstance(attribute_field, tuple) and len(attribute_field) == 2: + attribute_name = attribute_field[0] + expected_attribute_type = attribute_field[1] + else: + raise ValueError("Unexpected declaration of attributes") + + # Check attribute exists + if not sensor.has_attribute(attribute_name): + missing_attributes.append(attribute_name) + + # Check attribute is of the expected type + if expected_attribute_type is not None: + attribute = sensor.get_attribute(attribute_name) + if not isinstance(attribute, expected_attribute_type): + wrong_type_attributes.append( + (attribute_name, type(attribute), expected_attribute_type) + ) + if missing_attributes: + raise MissingAttributeException( + f"Sensor is missing required attributes: {', '.join(missing_attributes)}" + ) + if wrong_type_attributes: + error_message = "" + for ( + attribute_name, + attribute_type, + expected_attribute_type, + ) in wrong_type_attributes: + error_message += f"- attribute '{attribute_name}' is a {attribute_type} instead of a {expected_attribute_type}\n" + raise WrongTypeAttributeException( + f"Sensor attributes are not of the required type:\n {error_message}" + ) From 2c629887990d81a2ff3b03e80ed5752f49328846 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Fri, 3 Dec 2021 16:50:44 +0100 Subject: [PATCH 04/46] Issues 252 foreign keys to sensor table on old sensor data tables (#255) Connect old sensor data tables to the Sensor table, with a database migration that replaces Power.asset_id with Power.sensor_id, Price.market_id with Price.sensor_id, and Weather.(weather_)sensor_id with Weather.sensor_id. It also simplifies the table joins in verify_power_in_db and verify_prices_in_db, moves over API conftests that create Power, Price and Weather instances, and refactors the use of services.forecasting to get rid of some legacy util functions. * Add new foreign key for Power model and start db migration * Simplify verify_power_in_db and verify_prices_in_db * Add new foreign key for Price model * Rename Weather attributes * Add new foreign key for Weather model * Bidirectional id syncing * Switch over Power, Price and Weather initialization * Reverse direction of id syncing * Unlink Power, Price and Weather from Asset, Market and WeatherSensor * Fix tests after unlinking Power, Price and Weather from Asset, market and WeatherSensor * Replace another mention of Market with Sensor in api v2_0 * Deprecate use of Market class in v2_0 implementation * Get Sensor instead of Market, for planning * Check for Sensor instead of Market, for market_id validation in AssetSchema * Analytics view gets price data via Sensor name rather than via Market name * Update db migration and revise to rename columns without touching rows * Collect time series as bdf with new sensor rather than with old sensor * Refactor to pass forecasting jobs the value type explicitly instead of a string, so we can get rid of two util functions and a couple more references to the old sensor models * Replace references to Market in test_solver.py * Fix type annotation * Fix CLI test input * Refactor query to util function * Adjust type annotation * Make sensor name optional, too, in query util * Rename variable to plural * Simplify sensor queries in CLI test, and attempt to store TimedBelief instead * Rename test CLI options * Fix test CLI option * Add todo * Temporarily revert to creating data with the old sensor data model * Hopefully clarify in-line comment * Remove unused auto generated Alembic commands --- .../api/common/utils/migration_utils.py | 23 ++-- flexmeasures/api/v1/implementations.py | 4 +- flexmeasures/api/v1/tests/conftest.py | 4 +- flexmeasures/api/v1/tests/utils.py | 8 +- flexmeasures/api/v1_1/implementations.py | 6 +- flexmeasures/api/v1_1/tests/conftest.py | 6 +- flexmeasures/api/v1_1/tests/test_api_v1_1.py | 3 +- flexmeasures/api/v1_1/tests/utils.py | 8 +- flexmeasures/api/v1_3/implementations.py | 4 +- flexmeasures/api/v1_3/tests/test_api_v1_3.py | 2 +- .../api/v2_0/implementations/sensors.py | 30 ++--- .../tests/test_api_v2_0_sensors_fresh_db.py | 3 +- flexmeasures/api/v2_0/tests/utils.py | 28 ++--- flexmeasures/cli/data_add.py | 9 +- flexmeasures/cli/testing.py | 93 +++++++------- flexmeasures/conftest.py | 6 +- ...r_relationships_for_power_price_weather.py | 90 ++++++++++++++ flexmeasures/data/models/assets.py | 18 +-- .../data/models/forecasting/__init__.py | 17 +-- .../models/forecasting/model_spec_factory.py | 6 +- flexmeasures/data/models/markets.py | 8 +- .../data/models/planning/tests/test_solver.py | 13 +- flexmeasures/data/models/planning/utils.py | 17 +-- flexmeasures/data/models/time_series.py | 7 +- flexmeasures/data/models/weather.py | 8 +- flexmeasures/data/queries/analytics.py | 7 +- flexmeasures/data/queries/sensors.py | 29 +++++ flexmeasures/data/schemas/assets.py | 6 +- flexmeasures/data/scripts/data_gen.py | 16 +-- flexmeasures/data/services/forecasting.py | 114 ++++-------------- flexmeasures/data/services/scheduling.py | 2 +- flexmeasures/data/services/time_series.py | 24 ++-- flexmeasures/data/tests/conftest.py | 4 +- .../data/tests/test_forecasting_jobs.py | 33 ++--- .../tests/test_forecasting_jobs_fresh_db.py | 23 ++-- flexmeasures/data/tests/test_queries.py | 10 +- .../data/tests/test_scheduling_jobs.py | 2 +- .../tests/test_scheduling_jobs_fresh_db.py | 2 +- flexmeasures/data/tests/test_user_services.py | 4 +- flexmeasures/ui/views/analytics.py | 11 +- 40 files changed, 371 insertions(+), 337 deletions(-) create mode 100644 flexmeasures/data/migrations/versions/830e72a8b218_migrate_sensor_relationships_for_power_price_weather.py create mode 100644 flexmeasures/data/queries/sensors.py diff --git a/flexmeasures/api/common/utils/migration_utils.py b/flexmeasures/api/common/utils/migration_utils.py index ad306fdd4..875991266 100644 --- a/flexmeasures/api/common/utils/migration_utils.py +++ b/flexmeasures/api/common/utils/migration_utils.py @@ -10,8 +10,10 @@ unrecognized_market, ResponseTuple, ) -from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.queries.sensors import ( + query_sensor_by_name_and_generic_asset_type_name, +) def get_sensor_by_unique_name( @@ -23,20 +25,13 @@ def get_sensor_by_unique_name( This function should be used only for sensors that correspond to the old Market class. """ # Look for the Sensor object - query = Sensor.query.filter(Sensor.name == sensor_name) - if generic_asset_type_names is not None: - query = ( - query.join(GenericAsset) - .join(GenericAssetType) - .filter(GenericAssetType.name.in_(generic_asset_type_names)) - .filter(GenericAsset.generic_asset_type_id == GenericAssetType.id) - .filter(Sensor.generic_asset_id == GenericAsset.id) - ) - sensor = query.all() - if len(sensor) == 0: + sensors = query_sensor_by_name_and_generic_asset_type_name( + sensor_name, generic_asset_type_names + ).all() + if len(sensors) == 0: return unrecognized_market(sensor_name) - elif len(sensor) > 1: + elif len(sensors) > 1: return deprecated_api_version( f"Multiple sensors were found named {sensor_name}." ) - return sensor[0] + return sensors[0] diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 251acc1c1..d0f16e0d2 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -302,7 +302,7 @@ def create_connection_and_value_groups( # noqa: C901 value=value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption horizon=h, - asset_id=sensor_id, + sensor_id=sensor_id, data_source_id=data_source.id, ) power_measurements.append(p) @@ -313,7 +313,7 @@ def create_connection_and_value_groups( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( - "Power", + Power, sensor_id, start, start + duration, diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 0c5e0d3da..88e46e800 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -95,7 +95,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices + timedelta(minutes=15 * i), horizon=timedelta(0), value=(100.0 + i) * -1, - asset_id=cs_5.id, + sensor_id=cs_5.id, data_source_id=user1_data_source.id, ) p_2 = Power( @@ -103,7 +103,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices + timedelta(minutes=15 * i), horizon=timedelta(hours=0), value=(1000.0 - 10 * i) * -1, - asset_id=cs_5.id, + sensor_id=cs_5.id, data_source_id=user2_data_source.id, ) meter_data.append(p_1) diff --git a/flexmeasures/api/v1/tests/utils.py b/flexmeasures/api/v1/tests/utils.py index 560d3d0f3..10f87e529 100644 --- a/flexmeasures/api/v1/tests/utils.py +++ b/flexmeasures/api/v1/tests/utils.py @@ -7,7 +7,7 @@ import pandas as pd from flexmeasures.api.common.utils.validators import validate_user_sources -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.time_series import Sensor @@ -122,10 +122,8 @@ def verify_power_in_db( db.session.query(Power.datetime, Power.value, Power.data_source_id) .filter((Power.datetime > start - resolution) & (Power.datetime < end)) .filter(Power.horizon == horizon) - .join( - Asset, Sensor - ) # we still need to join Asset, because Power.asset_id is still coupled to Asset rather than Sensor; see https://github.com/SeitaBV/flexmeasures/issues/252 - .filter(Power.asset_id == Sensor.id) + .join(Sensor) + .filter(Power.sensor_id == Sensor.id) .filter(Sensor.name == sensor.name) ) if "source" in message: diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index 3c44b6c32..baec6e3e9 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -117,7 +117,7 @@ def post_price_data_response( datetime=dt, value=value, horizon=h, - market_id=sensor.id, + sensor_id=sensor.id, data_source_id=data_source.id, ) prices.append(p) @@ -127,7 +127,7 @@ def post_price_data_response( if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( - "Price", + Price, sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, @@ -214,7 +214,7 @@ def post_weather_data_response( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( - "Weather", + Weather, sensor.id, start, start + duration, diff --git a/flexmeasures/api/v1_1/tests/conftest.py b/flexmeasures/api/v1_1/tests/conftest.py index 79ac409c9..d826b3482 100644 --- a/flexmeasures/api/v1_1/tests/conftest.py +++ b/flexmeasures/api/v1_1/tests/conftest.py @@ -62,7 +62,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices + timedelta(minutes=15 * i), horizon=timedelta(hours=6), value=(300 + i) * -1, - asset_id=cs_1.id, + sensor_id=cs_1.id, data_source_id=data_source.id, ) p_2 = Power( @@ -70,7 +70,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices + timedelta(minutes=15 * i), horizon=timedelta(hours=6), value=(300 - i) * -1, - asset_id=cs_2.id, + sensor_id=cs_2.id, data_source_id=data_source.id, ) p_3 = Power( @@ -78,7 +78,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices + timedelta(minutes=15 * i), horizon=timedelta(hours=6), value=(0 + i) * -1, - asset_id=cs_3.id, + sensor_id=cs_3.id, data_source_id=data_source.id, ) power_forecasts.append(p_1) diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1.py b/flexmeasures/api/v1_1/tests/test_api_v1_1.py index a97131ba9..514058311 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1.py @@ -24,6 +24,7 @@ from flexmeasures.auth.error_handling import UNAUTH_ERROR_STATUS from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.user import User +from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor @@ -154,7 +155,7 @@ def test_post_price_data(setup_api_test_data, db, app, clean_redis, post_message for job, horizon in zip(jobs, horizons): assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon - assert job.kwargs["timed_value_type"] == "Price" + assert job.kwargs["timed_value_type"] == Price assert job.kwargs["old_sensor_id"] == market.id diff --git a/flexmeasures/api/v1_1/tests/utils.py b/flexmeasures/api/v1_1/tests/utils.py index 8473dfece..6e0efe36f 100644 --- a/flexmeasures/api/v1_1/tests/utils.py +++ b/flexmeasures/api/v1_1/tests/utils.py @@ -9,7 +9,7 @@ from flask import current_app from flexmeasures.api.common.schemas.sensors import SensorField -from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor @@ -160,10 +160,8 @@ def verify_prices_in_db(post_message, values, db, swapped_sign: bool = False): db.session.query(Price.value, Price.horizon) .filter((Price.datetime > start - resolution) & (Price.datetime < end)) .filter(Price.horizon == horizon - (end - (Price.datetime + resolution))) - .join( - Market, Sensor - ) # we still need to join Market, because Price.market_id is still coupled to Market rather than Sensor; see https://github.com/SeitaBV/flexmeasures/issues/252 - .filter(Price.market_id == Sensor.id) + .join(Sensor) + .filter(Price.sensor_id == Sensor.id) .filter(Sensor.name == sensor.name) ) df = pd.DataFrame( diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index cef4539df..2ecd638de 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -168,12 +168,12 @@ def get_device_message_response(generic_asset_name_groups, duration): Power.data_source_id, func.min(Power.horizon).label("most_recent_belief_horizon"), ) - .filter(Power.asset_id == sensor_id) + .filter(Power.sensor_id == sensor_id) .group_by(Power.datetime, Power.data_source_id) .subquery() ) power_values = ( - Power.query.filter(Power.asset_id == sensor_id) + Power.query.filter(Power.sensor_id == sensor_id) .filter(Power.data_source_id == scheduler_source.id) .filter(Power.datetime >= schedule_start) .filter(Power.datetime < schedule_start + planning_horizon) diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3.py b/flexmeasures/api/v1_3/tests/test_api_v1_3.py index b8c501f4c..bdf2c7abd 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3.py @@ -97,7 +97,7 @@ def test_post_udi_event_and_get_device_message( scheduler_source is not None ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.asset_id == sensor.id) + Power.query.filter(Power.sensor_id == sensor.id) .filter(Power.data_source_id == scheduler_source.id) .all() ) diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index d96948a66..03e3cb29d 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -34,7 +34,7 @@ from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.markets import Price from flexmeasures.data.models.weather import Weather from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.data.services.resources import get_sensors @@ -81,18 +81,18 @@ def post_price_data_response( # noqa C901 ea = parse_entity_address(market, entity_type="market") except EntityAddressException as eae: return invalid_domain(str(eae)) - market_id = ea["sensor_id"] + sensor_id = ea["sensor_id"] - # Look for the Market object - market = Market.query.filter(Market.id == market_id).one_or_none() - if market is None: - return unrecognized_market(market_id) - elif unit != market.unit: - return invalid_unit("%s prices" % market.display_name, [market.unit]) + # Look for the Sensor object + sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() + if sensor is None: + return unrecognized_market(sensor_id) + elif unit != sensor.unit: + return invalid_unit("%s prices" % sensor.name, [sensor.unit]) # Convert to timely-beliefs terminology event_starts, belief_horizons = determine_belief_timing( - event_values, start, resolution, horizon, prior, market + event_values, start, resolution, horizon, prior, sensor ) # Create new Price objects @@ -102,7 +102,7 @@ def post_price_data_response( # noqa C901 datetime=event_start, value=event_value, horizon=belief_horizon, - market_id=market.id, + sensor_id=sensor.id, data_source_id=data_source.id, ) for event_start, event_value, belief_horizon in zip( @@ -116,8 +116,8 @@ def post_price_data_response( # noqa C901 if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( - "Price", - market.id, + Price, + sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, resolution=duration / len(event_values), @@ -206,7 +206,7 @@ def post_weather_data_response( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset forecasting_jobs.extend( create_forecasting_jobs( - "Weather", + Weather, sensor.id, start, start + duration, @@ -358,7 +358,7 @@ def post_power_data( value=event_value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption horizon=belief_horizon, - asset_id=sensor_id, + sensor_id=sensor_id, data_source_id=data_source.id, ) for event_start, event_value, belief_horizon in zip( @@ -370,7 +370,7 @@ def post_power_data( if create_forecasting_jobs_too: forecasting_jobs.extend( create_forecasting_jobs( - "Power", + Power, sensor_id, start, start + duration, diff --git a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py index 4b7adcbf4..2b9a24e68 100644 --- a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py +++ b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py @@ -10,6 +10,7 @@ message_for_post_price_data, verify_sensor_data_in_db, ) +from flexmeasures.data.models.markets import Price @pytest.mark.parametrize( @@ -59,5 +60,5 @@ def test_post_price_data_2_0( for job, horizon in zip(jobs, horizons): assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon - assert job.kwargs["timed_value_type"] == "Price" + assert job.kwargs["timed_value_type"] == Price assert job.kwargs["old_sensor_id"] == market.id diff --git a/flexmeasures/api/v2_0/tests/utils.py b/flexmeasures/api/v2_0/tests/utils.py index 6bc8c1ab9..b0608d509 100644 --- a/flexmeasures/api/v2_0/tests/utils.py +++ b/flexmeasures/api/v2_0/tests/utils.py @@ -1,4 +1,4 @@ -from typing import Optional, Union +from typing import Optional from datetime import timedelta from isodate import duration_isoformat, parse_duration, parse_datetime @@ -6,10 +6,10 @@ import timely_beliefs as tb from flexmeasures.api.common.schemas.sensors import SensorField -from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.assets import Power +from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.models.weather import WeatherSensor, Weather +from flexmeasures.data.models.weather import Weather from flexmeasures.data.services.users import find_user_by_email from flexmeasures.api.v1_1.tests.utils import ( message_for_post_price_data as v1_1_message_for_post_price_data, @@ -26,7 +26,7 @@ def get_asset_post_data() -> dict: "longitude": 100.42, "asset_type_name": "battery", "owner_id": find_user_by_email("test_prosumer_user@seita.nl").id, - "market_id": Market.query.filter_by(name="epex_da").one_or_none().id, + "market_id": Sensor.query.filter_by(name="epex_da").one_or_none().id, } return post_data @@ -79,25 +79,21 @@ def verify_sensor_data_in_db( ): """util method to verify that sensor data ended up in the database""" if entity_type == "sensor": - sensor_type = Sensor data_type = TimedBelief elif entity_type == "connection": - sensor_type = Asset data_type = Power elif entity_type == "market": - sensor_type = Market data_type = Price elif entity_type == "weather_sensor": - sensor_type = WeatherSensor data_type = Weather else: raise ValueError("Unknown entity type") start = parse_datetime(post_message["start"]) end = start + parse_duration(post_message["duration"]) - sensor: Union[Sensor, Asset, Market, WeatherSensor] = SensorField( - entity_type, fm_scheme - ).deserialize(post_message[entity_type]) + sensor: Sensor = SensorField(entity_type, fm_scheme).deserialize( + post_message[entity_type] + ) resolution = sensor.event_resolution if "horizon" in post_message: horizon = parse_duration(post_message["horizon"]) @@ -107,8 +103,8 @@ def verify_sensor_data_in_db( (data_type.datetime > start - resolution) & (data_type.datetime < end) ) .filter(data_type.horizon == horizon) - .join(sensor_type) - .filter(sensor_type.name == sensor.name) + .join(Sensor) + .filter(Sensor.name == sensor.name) ) else: query = ( @@ -121,8 +117,8 @@ def verify_sensor_data_in_db( (data_type.datetime > start - resolution) & (data_type.datetime < end) ) # .filter(data_type.horizon == (data_type.datetime + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function - .join(sensor_type) - .filter(sensor_type.name == sensor.name) + .join(Sensor) + .filter(Sensor.name == sensor.name) ) # todo: after basing Price on TimedBelief, we should be able to get a BeliefsDataFrame from the query directly df = pd.DataFrame( diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 649c2e9b5..7202abfa3 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -16,6 +16,9 @@ from flexmeasures.data import db from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.data.services.users import create_user +from flexmeasures.data.models.assets import Power +from flexmeasures.data.models.markets import Price +from flexmeasures.data.models.weather import Weather from flexmeasures.data.models.user import Account, AccountRole, RolesAccounts from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.schemas.sensors import SensorSchema @@ -607,11 +610,11 @@ def create_forecasts( if as_job: if asset_type == "Asset": - value_type = "Power" + value_type = Power elif asset_type == "Market": - value_type = "Price" + value_type = Price elif asset_type == "WeatherSensor": - value_type = "Weather" + value_type = Weather else: raise TypeError(f"Unknown asset_type {asset_type}") diff --git a/flexmeasures/cli/testing.py b/flexmeasures/cli/testing.py index f9ef8bdc5..00ba824ce 100644 --- a/flexmeasures/cli/testing.py +++ b/flexmeasures/cli/testing.py @@ -1,5 +1,5 @@ # flake8: noqa: E402 -from typing import Optional +from typing import List, Optional from datetime import datetime, timedelta import os @@ -12,11 +12,12 @@ else: from rq import Worker -from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Market -from flexmeasures.data.models.weather import WeatherSensor +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.forecasting import lookup_model_specs_configurator -from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor +from flexmeasures.data.models.time_series import TimedBelief +from flexmeasures.data.queries.sensors import ( + query_sensor_by_name_and_generic_asset_type_name, +) from flexmeasures.utils.time_utils import as_server_time from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, @@ -38,9 +39,9 @@ def test_making_forecasts(): click.echo("Manual forecasting job queuing started ...") - asset_id = 1 + sensor_id = 1 forecast_filter = ( - Power.query.filter(Power.asset_id == asset_id) + Power.query.filter(Power.sensor_id == sensor_id) .filter(Power.horizon == timedelta(hours=6)) .filter( (Power.datetime >= as_server_time(datetime(2015, 4, 1, 6))) @@ -53,8 +54,8 @@ def test_making_forecasts(): click.echo("Forecasts found before : %d" % forecast_filter.count()) create_forecasting_jobs( - old_sensor_id=asset_id, - timed_value_type="Power", + old_sensor_id=sensor_id, + timed_value_type=Power, horizons=[timedelta(hours=6)], start_of_roll=as_server_time(datetime(2015, 4, 1)), end_of_roll=as_server_time(datetime(2015, 4, 3)), @@ -79,21 +80,36 @@ def test_making_forecasts(): # un-comment to use as CLI function # @app.cli.command() -@click.option("--asset-type", help="Asset type name.") -@click.option("--asset", "asset_name", help="Asset name.") +@click.option( + "--asset-type", + "generic_asset_type_names", + multiple=True, + required=True, + help="Name of generic asset type.", +) +@click.option( + "--timed-value-type", + "timed_value_type", + required=True, + help="Power, Price or Weather.", +) +@click.option("--sensor", "sensor_name", help="Name of sensor.") @click.option( "--from_date", default="2015-03-10", help="Forecast from date. Follow up with a date in the form yyyy-mm-dd.", ) @click.option("--period", default=3, help="Forecasting period in days.") -@click.option("--horizon", default=1, help="Forecasting horizon in hours.") +@click.option( + "--horizon", "horizon_hours", default=1, help="Forecasting horizon in hours." +) @click.option( "--training", default=30, help="Number of days in the training and testing period." ) def test_generic_model( - asset_type: str, - asset_name: Optional[str] = None, + generic_asset_type_names: List[str], + timed_value_type_name: str, + sensor_name: Optional[str] = None, from_date: str = "2015-03-10", period: int = 3, horizon_hours: int = 1, @@ -101,39 +117,32 @@ def test_generic_model( ): """Manually test integration of timetomodel for our generic model.""" - asset_type_name = asset_type - if asset_name is None: - asset_name = Asset.query.filter_by(asset_type_name=asset_type_name).first().name start = as_server_time(datetime.strptime(from_date, "%Y-%m-%d")) end = start + timedelta(days=period) training_and_testing_period = timedelta(days=training) horizon = timedelta(hours=horizon_hours) with app.app_context(): - asset = ( - Asset.query.filter_by(asset_type_name=asset_type_name) - .filter_by(name=asset_name) - .first() - ) - market = ( - Market.query.filter_by(market_type_name=asset_type_name) - .filter_by(name=asset_name) - .first() - ) - sensor = ( - WeatherSensor.query.filter_by(weather_sensor_type_name=asset_type_name) - .filter_by(name=asset_name) - .first() - ) - if asset: - old_sensor = asset - elif market: - old_sensor = market - elif sensor: - old_sensor = sensor - else: - click.echo("No such assets in db, so I will not add any forecasts.") + sensors = query_sensor_by_name_and_generic_asset_type_name( + sensor_name=sensor_name, + generic_asset_type_names=generic_asset_type_names, + ).all() + if len(sensors) == 0: + click.echo("No such sensor in db, so I will not add any forecasts.") return + elif len(sensors) > 1: + click.echo("No unique sensor found in db, so I will not add any forecasts.") + return + + # todo: replacing this with timed_value_type = TimedBelief requires streamlining of the collect function on old sensor data classes with the search function on the TimedBelief class + if timed_value_type_name.lower() == "Power": + from flexmeasures.data.models.assets import Power as TimedValueType + elif timed_value_type_name.lower() == "Price": + from flexmeasures.data.models.markets import Price as TimedValueType + elif timed_value_type_name.lower() == "Weather": + from flexmeasures.data.models.weather import Weather as TimedValueType + else: + raise ValueError(f"Unknown timed value type {timed_value_type_name}") linear_model_configurator = lookup_model_specs_configurator("linear") ( @@ -141,8 +150,8 @@ def test_generic_model( model_identifier, fallback_model_identifier, ) = linear_model_configurator( - sensor=old_sensor.corresponding_sensor, - time_series_class=determine_old_time_series_class_by_old_sensor(old_sensor), + sensor=sensors[0], + time_series_class=TimedValueType, forecast_start=start, forecast_end=end, forecast_horizon=horizon, diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 67e3c0ee1..240fe4073 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -336,8 +336,8 @@ def setup_assets( horizon=parse_duration("PT0M"), value=val, data_source_id=setup_sources["Seita"].id, + sensor_id=asset.id, ) - p.asset = asset db.session.add(p) return {asset.name: asset for asset in assets} @@ -399,8 +399,8 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources horizon=timedelta(hours=0), value=val, data_source_id=setup_sources["Seita"].id, + sensor_id=setup_markets["epex_da"].id, ) - p.market = setup_markets["epex_da"] db.session.add(p) # another day of test data (8 expensive hours, 8 cheap hours, and again 8 expensive hours) @@ -414,8 +414,8 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources horizon=timedelta(hours=0), value=val, data_source_id=setup_sources["Seita"].id, + sensor_id=setup_markets["epex_da"].id, ) - p.market = setup_markets["epex_da"] db.session.add(p) diff --git a/flexmeasures/data/migrations/versions/830e72a8b218_migrate_sensor_relationships_for_power_price_weather.py b/flexmeasures/data/migrations/versions/830e72a8b218_migrate_sensor_relationships_for_power_price_weather.py new file mode 100644 index 000000000..c39390fd8 --- /dev/null +++ b/flexmeasures/data/migrations/versions/830e72a8b218_migrate_sensor_relationships_for_power_price_weather.py @@ -0,0 +1,90 @@ +"""Migrate sensor relationships for Power/Price/Weather + +Revision ID: 830e72a8b218 +Revises: 6cf5b241b85f +Create Date: 2021-12-02 14:58:06.581092 + +""" +from alembic import op + + +# revision identifiers, used by Alembic. +revision = "830e72a8b218" +down_revision = "6cf5b241b85f" +branch_labels = None +depends_on = None + + +def upgrade(): + + # Migrate Power/Asset relationship to Power/Sensor relationship + op.drop_constraint("power_asset_id_asset_fkey", "power", type_="foreignkey") + op.drop_index("power_asset_id_idx", table_name="power") + op.alter_column("power", "asset_id", new_column_name="sensor_id") + op.create_index(op.f("power_sensor_id_idx"), "power", ["sensor_id"], unique=False) + op.create_foreign_key( + op.f("power_sensor_id_sensor_fkey"), + "power", + "sensor", + ["sensor_id"], + ["id"], + ondelete="CASCADE", + ) + + # Migrate Price/Market relationship to Price/Sensor relationship + op.drop_constraint("price_market_id_market_fkey", "price", type_="foreignkey") + op.drop_index("price_market_id_idx", table_name="price") + op.alter_column("price", "market_id", new_column_name="sensor_id") + op.create_index(op.f("price_sensor_id_idx"), "price", ["sensor_id"], unique=False) + op.create_foreign_key( + op.f("price_sensor_id_sensor_fkey"), "price", "sensor", ["sensor_id"], ["id"] + ) + + # Migrate Weather/WeatherSensor relationship to Weather/Sensor relationship + op.drop_constraint( + "weather_sensor_id_weather_sensor_fkey", "weather", type_="foreignkey" + ) + op.create_foreign_key( + op.f("weather_sensor_id_sensor_fkey"), + "weather", + "sensor", + ["sensor_id"], + ["id"], + ) + + +def downgrade(): + # Migrate Weather/Sensor relationship to Weather/WeatherSensor relationship + op.drop_constraint( + op.f("weather_sensor_id_sensor_fkey"), "weather", type_="foreignkey" + ) + op.create_foreign_key( + "weather_sensor_id_weather_sensor_fkey", + "weather", + "weather_sensor", + ["sensor_id"], + ["id"], + ) + + # Migrate Price/Sensor relationship to Price/Market relationship + op.drop_constraint(op.f("price_sensor_id_sensor_fkey"), "price", type_="foreignkey") + op.drop_index(op.f("price_sensor_id_idx"), table_name="price") + op.alter_column("price", "sensor_id", new_column_name="market_id") + op.create_index("price_market_id_idx", "price", ["market_id"], unique=False) + op.create_foreign_key( + "price_market_id_market_fkey", "price", "market", ["market_id"], ["id"] + ) + + # Migrate Power/Sensor relationship to Power/Asset relationship + op.drop_constraint(op.f("power_sensor_id_sensor_fkey"), "power", type_="foreignkey") + op.drop_index(op.f("power_sensor_id_idx"), table_name="power") + op.alter_column("power", "sensor_id", new_column_name="asset_id") + op.create_index("power_asset_id_idx", "power", ["asset_id"], unique=False) + op.create_foreign_key( + "power_asset_id_asset_fkey", + "power", + "asset", + ["asset_id"], + ["id"], + ondelete="CASCADE", + ) diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index 0815b2039..4584a93c3 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -211,7 +211,7 @@ def latest_state(self, event_ends_before: Optional[datetime] = None) -> "Power": """Search the most recent event for this sensor, optionally before some datetime.""" # todo: replace with Sensor.latest_state power_query = ( - Power.query.filter(Power.asset == self) + Power.query.filter(Power.sensor_id == self.id) .filter(Power.horizon <= timedelta(hours=0)) .order_by(Power.datetime.desc()) ) @@ -314,14 +314,14 @@ class Power(TimedValue, db.Model): TODO: If there are more than one measurement per asset per time step possible, we can expand rather easily. """ - asset_id = db.Column( + sensor_id = db.Column( db.Integer(), - db.ForeignKey("asset.id", ondelete="CASCADE"), + db.ForeignKey("sensor.id", ondelete="CASCADE"), primary_key=True, index=True, ) - asset = db.relationship( - "Asset", + sensor = db.relationship( + "Sensor", backref=db.backref( "measurements", lazy=True, @@ -336,12 +336,12 @@ def make_query( **kwargs, ) -> Query: """Construct the database query.""" - return super().make_query(old_sensor_class=Asset, **kwargs) + return super().make_query(**kwargs) def to_dict(self): return { "datetime": isodate.datetime_isoformat(self.datetime), - "asset_id": self.asset_id, + "sensor_id": self.sensor_id, "value": self.value, "horizon": self.horizon, } @@ -350,9 +350,9 @@ def __init__(self, **kwargs): super(Power, self).__init__(**kwargs) def __repr__(self): - return "" % ( + return "" % ( self.value, - self.asset_id, + self.sensor_id, self.datetime, self.data_source_id, self.horizon, diff --git a/flexmeasures/data/models/forecasting/__init__.py b/flexmeasures/data/models/forecasting/__init__.py index faba9c160..d22f5cc3f 100644 --- a/flexmeasures/data/models/forecasting/__init__.py +++ b/flexmeasures/data/models/forecasting/__init__.py @@ -1,5 +1,4 @@ -from typing import Tuple, Callable, Union, Optional -from datetime import datetime, timedelta +from typing import Tuple, Callable from timetomodel import ModelSpecs @@ -10,10 +9,6 @@ ols_specs_configurator as linear_ols_specs, ) -from flexmeasures.data.models.assets import Asset -from flexmeasures.data.models.markets import Market -from flexmeasures.data.models.weather import WeatherSensor - model_map = { "naive": naive_specs, @@ -25,14 +20,8 @@ def lookup_model_specs_configurator( model_search_term: str = "linear-OLS", ) -> Callable[ - [ - Union[Asset, Market, WeatherSensor], - datetime, - datetime, - timedelta, - Optional[timedelta], - Optional[dict], - ], + ..., # See model_spec_factory.create_initial_model_specs for an up-to-date type annotation + # Annotating here would require Python>=3.10 (specifically, ParamSpec from PEP 612) Tuple[ModelSpecs, str, str], ]: """ diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 13f0ac3bb..5f751b446 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -119,11 +119,13 @@ def create_initial_model_specs( # noqa: C901 forecast_start: datetime, # Start of forecast period forecast_end: datetime, # End of forecast period forecast_horizon: timedelta, # Duration between time of forecasting and end time of the event that is forecast - ex_post_horizon: timedelta = None, + ex_post_horizon: Optional[timedelta] = None, transform_to_normal: bool = True, use_regressors: bool = True, # If false, do not create regressor specs use_periodicity: bool = True, # If false, do not create lags given the asset's periodicity - custom_model_params: dict = None, # overwrite forecasting params, most useful for testing or experimentation. + custom_model_params: Optional[ + dict + ] = None, # overwrite model params, most useful for tests or experiments ) -> ModelSpecs: """ Generic model specs for all asset types (also for markets and weather sensors) and horizons. diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index c89c26be7..b2127eb5b 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -192,15 +192,15 @@ class Price(TimedValue, db.Model): TODO: datetime objects take up most of the space (12 bytes each)). One way out is to normalise them out to a table. """ - market_id = db.Column( - db.Integer(), db.ForeignKey("market.id"), primary_key=True, index=True + sensor_id = db.Column( + db.Integer(), db.ForeignKey("sensor.id"), primary_key=True, index=True ) - market = db.relationship("Market", backref=db.backref("prices", lazy=True)) + sensor = db.relationship("Sensor", backref=db.backref("prices", lazy=True)) @classmethod def make_query(cls, **kwargs) -> Query: """Construct the database query.""" - return super().make_query(old_sensor_class=Market, **kwargs) + return super().make_query(**kwargs) def __init__(self, **kwargs): super(Price, self).__init__(**kwargs) diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index cdbc1191a..9daef80db 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -4,7 +4,6 @@ import numpy as np import pandas as pd -from flexmeasures.data.models.markets import Market from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station @@ -13,9 +12,9 @@ def test_battery_solver_day_1(add_battery_assets): - epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() + epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() - assert Market.query.get(battery.get_attribute("market_id")) == epex_da + assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 1)) end = as_server_time(datetime(2015, 1, 2)) resolution = timedelta(minutes=15) @@ -35,9 +34,9 @@ def test_battery_solver_day_1(add_battery_assets): def test_battery_solver_day_2(add_battery_assets): - epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() + epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() - assert Market.query.get(battery.get_attribute("market_id")) == epex_da + assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) @@ -83,11 +82,11 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): soc_at_start = 1 duration_until_target = timedelta(hours=2) - epex_da = Market.query.filter(Market.name == "epex_da").one_or_none() + epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() charging_station = Sensor.query.filter( Sensor.name == charging_station_name ).one_or_none() - assert Market.query.get(charging_station.get_attribute("market_id")) == epex_da + assert Sensor.query.get(charging_station.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index 97b2968ce..b44616160 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -7,7 +7,7 @@ import numpy as np import timely_beliefs as tb -from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.planning.exceptions import ( UnknownMarketException, @@ -60,11 +60,12 @@ def add_tiny_price_slope( return prices -def get_market(sensor: Sensor) -> Market: - market = Market.query.get(sensor.get_attribute("market_id")) - if market is None: +def get_market(sensor: Sensor) -> Sensor: + """Get market sensor from the sensor's attributes.""" + sensor = Sensor.query.get(sensor.get_attribute("market_id")) + if sensor is None: raise UnknownMarketException - return market + return sensor def get_prices( @@ -78,11 +79,11 @@ def get_prices( (this may require implementing a belief time for scheduling jobs). """ - # Look for the applicable market - market = get_market(sensor) + # Look for the applicable market sensor + sensor = get_market(sensor) price_bdf: tb.BeliefsDataFrame = Price.collect( - market.name, + sensor.name, query_window=query_window, resolution=to_offset(resolution).freqstr, ) diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index c32ce0ec7..9d87f1e0d 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -435,7 +435,6 @@ def data_source_id(cls): # noqa: B902 @classmethod def make_query( cls, - old_sensor_class: db.Model, old_sensor_names: Tuple[str], query_window: Tuple[Optional[datetime_type], Optional[datetime_type]], belief_horizon_window: Tuple[Optional[timedelta], Optional[timedelta]] = ( @@ -473,11 +472,9 @@ def make_query( if session is None: session = db.session start, end = query_window - query = create_beliefs_query( - cls, session, old_sensor_class, old_sensor_names, start, end - ) + query = create_beliefs_query(cls, session, Sensor, old_sensor_names, start, end) query = add_belief_timing_filter( - cls, query, old_sensor_class, belief_horizon_window, belief_time_window + cls, query, Sensor, belief_horizon_window, belief_time_window ) if user_source_ids: query = add_user_source_filter(cls, query, user_source_ids) diff --git a/flexmeasures/data/models/weather.py b/flexmeasures/data/models/weather.py index 9354b6517..5a5f4771a 100644 --- a/flexmeasures/data/models/weather.py +++ b/flexmeasures/data/models/weather.py @@ -274,20 +274,20 @@ class Weather(TimedValue, db.Model): """ sensor_id = db.Column( - db.Integer(), db.ForeignKey("weather_sensor.id"), primary_key=True, index=True + db.Integer(), db.ForeignKey("sensor.id"), primary_key=True, index=True ) - sensor = db.relationship("WeatherSensor", backref=db.backref("weather", lazy=True)) + sensor = db.relationship("Sensor", backref=db.backref("weather", lazy=True)) @classmethod def make_query(cls, **kwargs) -> Query: """Construct the database query.""" - return super().make_query(old_sensor_class=WeatherSensor, **kwargs) + return super().make_query(**kwargs) def __init__(self, **kwargs): super(Weather, self).__init__(**kwargs) def __repr__(self): - return "" % ( + return "" % ( self.value, self.sensor_id, self.datetime, diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index 3b8f79d8c..3bcc6f9da 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -13,7 +13,8 @@ from flexmeasures.utils import calculations, time_utils from flexmeasures.data.services.resources import Resource, find_closest_weather_sensor from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Market, Price +from flexmeasures.data.models.markets import Price +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.weather import Weather, WeatherSensor, WeatherSensorType @@ -157,7 +158,7 @@ def get_power_data( def get_prices_data( metrics: dict, - market: Market, + market_sensor: Sensor, query_window: Tuple[datetime, datetime], resolution: str, forecast_horizon: timedelta, @@ -172,7 +173,7 @@ def get_prices_data( - weighted absolute percentage error """ - market_name = "" if market is None else market.name + market_name = "" if market_sensor is None else market_sensor.name # Get price data price_bdf: tb.BeliefsDataFrame = Price.collect( diff --git a/flexmeasures/data/queries/sensors.py b/flexmeasures/data/queries/sensors.py new file mode 100644 index 000000000..8fec46598 --- /dev/null +++ b/flexmeasures/data/queries/sensors.py @@ -0,0 +1,29 @@ +from typing import List, Optional + +from sqlalchemy.orm import Query + +from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType +from flexmeasures.data.models.time_series import Sensor + + +def query_sensor_by_name_and_generic_asset_type_name( + sensor_name: Optional[str] = None, + generic_asset_type_names: Optional[List[str]] = None, +) -> Query: + """Match a sensor by its own name and that of its generic asset type. + + :param sensor_name: should match (if None, no match is needed) + :param generic_asset_type_names: should match at least one of these (if None, no match is needed) + """ + query = Sensor.query + if sensor_name is not None: + query = query.filter(Sensor.name == sensor_name) + if generic_asset_type_names is not None: + query = ( + query.join(GenericAsset) + .join(GenericAssetType) + .filter(GenericAssetType.name.in_(generic_asset_type_names)) + .filter(GenericAsset.generic_asset_type_id == GenericAssetType.id) + .filter(Sensor.generic_asset_id == GenericAsset.id) + ) + return query diff --git a/flexmeasures/data/schemas/assets.py b/flexmeasures/data/schemas/assets.py index 0dfc0ffb6..aed32ade6 100644 --- a/flexmeasures/data/schemas/assets.py +++ b/flexmeasures/data/schemas/assets.py @@ -2,7 +2,7 @@ from flexmeasures.data import ma from flexmeasures.data.models.assets import Asset, AssetType -from flexmeasures.data.models.markets import Market +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.user import User from flexmeasures.data.schemas.sensors import SensorSchemaMixin @@ -34,8 +34,8 @@ def validate_owner(self, owner_id: int): @validates("market_id") def validate_market(self, market_id: int): - market = Market.query.get(market_id) - if not market: + sensor = Sensor.query.get(market_id) + if not sensor: raise ValidationError(f"Market with id {market_id} doesn't exist.") @validates("asset_type_name") diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index bdabb0f08..2b1ecbc5b 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -166,7 +166,7 @@ def add_dummy_tou_market(db: SQLAlchemy): datetime=datetime(year, 1, 1, tzinfo=pytz.utc), horizon=timedelta(0), data_source_id=source.id, - market=market, + sensor_id=market.id, ) ) @@ -335,7 +335,7 @@ def populate_time_series_forecasts( # noqa: C901 datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), horizon=horizon, value=value, - market_id=old_sensor.id, + sensor_id=old_sensor.id, data_source_id=data_source.id, ) for dt, value in forecasts.items() @@ -452,7 +452,7 @@ def depopulate_measurements( num_prices_deleted = ( db.session.query(Price) .filter(Price.horizon <= timedelta(hours=0)) - .filter(Price.market == market) + .filter(Price.sensor_id == market.id) .delete() ) else: @@ -466,7 +466,7 @@ def depopulate_measurements( num_power_measurements_deleted = ( db.session.query(Power) .filter(Power.horizon <= timedelta(hours=0)) - .filter(Power.asset == asset) + .filter(Power.sensor_id == asset.id) .delete() ) else: @@ -482,7 +482,7 @@ def depopulate_measurements( num_weather_measurements_deleted = ( db.session.query(Weather) .filter(Weather.horizon <= timedelta(hours=0)) - .filter(Weather.sensor == sensor) + .filter(Weather.sensor_id == sensor.id) .delete() ) else: @@ -547,7 +547,7 @@ def depopulate_prognoses( num_prices_deleted = ( db.session.query(Price) .filter(Price.horizon > timedelta(hours=0)) - .filter(Price.market == market) + .filter(Price.sensor_id == market.id) .delete() ) else: @@ -561,7 +561,7 @@ def depopulate_prognoses( num_power_measurements_deleted = ( db.session.query(Power) .filter(Power.horizon > timedelta(hours=0)) - .filter(Power.asset == asset) + .filter(Power.sensor_id == asset.id) .delete() ) else: @@ -577,7 +577,7 @@ def depopulate_prognoses( num_weather_measurements_deleted = ( db.session.query(Weather) .filter(Weather.horizon > timedelta(hours=0)) - .filter(Weather.sensor == sensor) + .filter(Weather.sensor_id == sensor.id) .delete() ) else: diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index a0d98c303..b73fbc1e8 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -1,5 +1,5 @@ from datetime import datetime, timedelta -from typing import List, Union +from typing import List, Type, Union from flask import current_app import click @@ -9,16 +9,16 @@ from timetomodel.forecasting import make_rolling_forecasts from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException -from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor +from flexmeasures.data.models.markets import Price +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.forecasting.utils import ( get_query_window, check_data_availability, ) -from flexmeasures.data.models.weather import Weather, WeatherSensor +from flexmeasures.data.models.weather import Weather from flexmeasures.data.utils import save_to_session, get_data_source from flexmeasures.utils.time_utils import ( as_server_time, @@ -46,7 +46,7 @@ class MisconfiguredForecastingJobException(Exception): def create_forecasting_jobs( - timed_value_type: str, + timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], old_sensor_id: int, start_of_roll: datetime, end_of_roll: datetime, @@ -124,7 +124,7 @@ def create_forecasting_jobs( def make_fixed_viewpoint_forecasts( old_sensor_id: int, - timed_value_type: str, + timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], horizon: timedelta, start: datetime, end: datetime, @@ -142,7 +142,7 @@ def make_fixed_viewpoint_forecasts( def make_rolling_viewpoint_forecasts( old_sensor_id: int, - timed_value_type: str, + timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], horizon: timedelta, start: datetime, end: datetime, @@ -159,7 +159,7 @@ def make_rolling_viewpoint_forecasts( ---------- :param old_sensor_id: int To identify which old sensor to forecast (note: old_sensor_id == sensor_id) - :param timed_value_type: str + :param timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]] This should go away after a refactoring - we now use it to create the DB entry for the forecasts :param horizon: timedelta duration between the end of each interval and the time at which the belief about that interval is formed @@ -181,15 +181,15 @@ def make_rolling_viewpoint_forecasts( # find out which model to run, fall back to latest recommended model_search_term = rq_job.meta.get("model_search_term", "linear-OLS") - # find old sensor - old_sensor = get_old_sensor(old_sensor_id, timed_value_type) + # find sensor + sensor = Sensor.query.filter_by(id=old_sensor_id).one_or_none() click.echo( "Running Forecasting Job %s: %s for %s on model '%s', from %s to %s" - % (rq_job.id, old_sensor, horizon, model_search_term, start, end) + % (rq_job.id, sensor, horizon, model_search_term, start, end) ) - if hasattr(old_sensor, "market_type"): + if hasattr(sensor, "market_type"): ex_post_horizon = None # Todo: until we sorted out the ex_post_horizon, use all available price data else: ex_post_horizon = timedelta(hours=0) @@ -197,8 +197,8 @@ def make_rolling_viewpoint_forecasts( # Make model specs model_configurator = lookup_model_specs_configurator(model_search_term) model_specs, model_identifier, fallback_model_search_term = model_configurator( - sensor=old_sensor.corresponding_sensor, - time_series_class=determine_old_time_series_class_by_old_sensor(old_sensor), + sensor=sensor, + time_series_class=timed_value_type, forecast_start=as_server_time(start), forecast_end=as_server_time(end), forecast_horizon=horizon, @@ -223,8 +223,8 @@ def make_rolling_viewpoint_forecasts( [lag * model_specs.frequency for lag in model_specs.lags], ) check_data_availability( - old_sensor, - determine_old_time_series_class_by_old_sensor(old_sensor), + sensor, + timed_value_type, start, end, query_window, @@ -245,8 +245,12 @@ def make_rolling_viewpoint_forecasts( click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) ts_value_forecasts = [ - make_timed_value( - timed_value_type, old_sensor_id, dt, value, horizon, data_source.id + timed_value_type( + datetime=dt, + horizon=horizon, + value=value, + sensor_id=old_sensor_id, + data_source_id=data_source.id, ) for dt, value in forecasts.items() ] @@ -305,75 +309,3 @@ def handle_forecasting_exception(job, exc_type, exc_value, traceback): def num_forecasts(start: datetime, end: datetime, resolution: timedelta) -> int: """Compute how many forecasts a job needs to make, given a resolution""" return (end - start) // resolution - - -# TODO: the functions below can hopefully go away if we refactor a real generic asset class -# and store everything in one time series database. - - -def get_old_sensor( - old_sensor_id: int, timed_value_type: str -) -> Union[Asset, Market, WeatherSensor]: - """Get old sensor for this job. Maybe simpler once we redesign timed value classes (make a generic one)""" - if timed_value_type not in ("Power", "Price", "Weather"): - raise Exception( - "Cannot get old sensor for timed_value_type '%s'" % timed_value_type - ) - old_sensor = None - if timed_value_type == "Power": - old_sensor = Asset.query.filter_by(id=old_sensor_id).one_or_none() - elif timed_value_type == "Price": - old_sensor = Market.query.filter_by(id=old_sensor_id).one_or_none() - elif timed_value_type == "Weather": - old_sensor = WeatherSensor.query.filter_by(id=old_sensor_id).one_or_none() - if old_sensor is None: - raise Exception( - "Cannot find old sensor for value type %s with id %d" - % (timed_value_type, old_sensor_id) - ) - return old_sensor - - -def make_timed_value( - timed_value_type: str, - old_sensor_id: int, - dt: datetime, - value: float, - horizon: timedelta, - data_source_id: int, -) -> Union[Power, Price, Weather]: - if timed_value_type not in ("Power", "Price", "Weather"): - raise Exception( - "Cannot get old sensor for timed_value_type '%s'" % timed_value_type - ) - ts_value = None - if timed_value_type == "Power": - ts_value = Power( - datetime=dt, - horizon=horizon, - value=value, - asset_id=old_sensor_id, - data_source_id=data_source_id, - ) - elif timed_value_type == "Price": - ts_value = Price( - datetime=dt, - horizon=horizon, - value=value, - market_id=old_sensor_id, - data_source_id=data_source_id, - ) - elif timed_value_type == "Weather": - ts_value = Weather( - datetime=dt, - horizon=horizon, - value=value, - sensor_id=old_sensor_id, - data_source_id=data_source_id, - ) - if ts_value is None: - raise Exception( - "Cannot create timed value of type %s with id %d" - % (timed_value_type, old_sensor_id) - ) - return ts_value diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index d94f88baa..c07bcde7b 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -149,7 +149,7 @@ def make_schedule( datetime=dt, horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), value=-value, - asset_id=asset_id, + sensor_id=asset_id, data_source_id=data_source.id, ) for dt, value in consumption_schedule.items() diff --git a/flexmeasures/data/services/time_series.py b/flexmeasures/data/services/time_series.py index e889f61c7..bfab31c70 100644 --- a/flexmeasures/data/services/time_series.py +++ b/flexmeasures/data/services/time_series.py @@ -213,25 +213,15 @@ def query_time_series_data( def find_sensor_by_name(name: str): """ Helper function: Find a sensor by name. - TODO: make obsolete when we switched to one sensor class (and timely-beliefs) + TODO: make obsolete when we switched to collecting sensor data by sensor id rather than name """ # importing here to avoid circular imports, deemed okay for temp. solution - from flexmeasures.data.models.assets import Asset - from flexmeasures.data.models.weather import WeatherSensor - from flexmeasures.data.models.markets import Market - - asset = Asset.query.filter(Asset.name == name).one_or_none() - if asset: - return asset - weather_sensor = WeatherSensor.query.filter( - WeatherSensor.name == name - ).one_or_none() - if weather_sensor: - return weather_sensor - market = Market.query.filter(Market.name == name).one_or_none() - if market: - return market - raise Exception("Unknown sensor: %s" % name) + from flexmeasures.data.models.time_series import Sensor + + sensor = Sensor.query.filter(Sensor.name == name).one_or_none() + if sensor is None: + raise Exception("Unknown sensor: %s" % name) + return sensor def drop_non_unique_ids( diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index ab9574937..1c2e882b4 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -79,8 +79,8 @@ def setup_fresh_test_data( horizon=parse_duration("PT0M"), value=val, data_source_id=data_source.id, + sensor_id=asset.id, ) - p.asset = asset db.session.add(p) add_test_weather_sensor_and_forecasts(fresh_db) @@ -130,7 +130,7 @@ def add_test_weather_sensor_and_forecasts(db: SQLAlchemy): for dt, val in zip(time_slots, values): db.session.add( Weather( - sensor=sensor, + sensor_id=sensor.id, datetime=as_server_time(dt), value=val, horizon=timedelta(hours=6), diff --git a/flexmeasures/data/tests/test_forecasting_jobs.py b/flexmeasures/data/tests/test_forecasting_jobs.py index f9be87b72..bf007a459 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs.py +++ b/flexmeasures/data/tests/test_forecasting_jobs.py @@ -7,7 +7,8 @@ from rq.job import Job from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, @@ -34,11 +35,11 @@ def get_data_source(model_identifier: str = "linear-OLS model v2"): ).one_or_none() -def check_aggregate(overall_expected: int, horizon: timedelta, asset_id: int): +def check_aggregate(overall_expected: int, horizon: timedelta, sensor_id: int): """Check that the expected number of forecasts were made for the given horizon, and check that each forecast is a number.""" all_forecasts = ( - Power.query.filter(Power.asset_id == asset_id) + Power.query.filter(Power.sensor_id == sensor_id) .filter(Power.horizon == horizon) .all() ) @@ -51,14 +52,14 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): - data source was made, - forecasts have been made """ - wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none() + wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() assert get_data_source() is None # makes 4 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 6)), end_of_roll=as_server_time(datetime(2015, 1, 1, 7)), horizons=[horizon], @@ -73,7 +74,7 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): assert get_data_source() is not None forecasts = ( - Power.query.filter(Power.asset_id == wind_device_1.id) + Power.query.filter(Power.sensor_id == wind_device_1.id) .filter(Power.horizon == horizon) .filter( (Power.datetime >= as_server_time(datetime(2015, 1, 1, 7))) @@ -86,11 +87,11 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_data): - solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none() + solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() last_power_datetime = ( ( - Power.query.filter(Power.asset_id == solar_device1.id) + Power.query.filter(Power.sensor_id == solar_device1.id) .filter(Power.horizon == timedelta(hours=0)) .order_by(Power.datetime.desc()) ) @@ -101,7 +102,7 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ # makes 4 forecasts, 1 of which is for a new datetime index horizon = timedelta(hours=6) job = create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=last_power_datetime - horizon - timedelta(minutes=30), # start of data on which forecast is based (5.15pm) @@ -119,7 +120,7 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.asset_id == solar_device1.id) + Power.query.filter(Power.sensor_id == solar_device1.id) .filter(Power.horizon == horizon) .filter(Power.datetime > last_power_datetime) .all() @@ -173,9 +174,9 @@ def check_failures( def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data): """This one (as well as the fallback) should fail as there is no underlying data. (Power data is in 2015)""" - solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none() + solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2016, 1, 1, 20)), end_of_roll=as_server_time(datetime(2016, 1, 1, 22)), horizons=[timedelta(hours=1)], @@ -188,9 +189,9 @@ def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data) def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): """ This one (as well as the fallback) should fail as the horizon is invalid.""" - solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none() + solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 21)), end_of_roll=as_server_time(datetime(2015, 1, 1, 23)), horizons=[timedelta(hours=18)], @@ -203,14 +204,14 @@ def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): def test_failed_unknown_model(app, clean_redis, setup_test_data): """ This one should fail because we use a model search term which yields no model configurator.""" - solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none() + solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() horizon = timedelta(hours=1) cmp = custom_model_params() cmp["training_and_testing_period"] = timedelta(days=365) create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], diff --git a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py index efdab97fa..27ff1df45 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py @@ -3,7 +3,8 @@ import pytest from sqlalchemy.orm import Query -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, handle_forecasting_exception, @@ -19,12 +20,12 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis): - wind_device2: Asset = Asset.query.filter_by(name="wind-asset-2").one_or_none() + wind_device2: Sensor = Sensor.query.filter_by(name="wind-asset-2").one_or_none() # makes 12 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 10)), end_of_roll=as_server_time(datetime(2015, 1, 1, 13)), horizons=[horizon], @@ -36,7 +37,7 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.asset_id == wind_device2.id) + Power.query.filter(Power.sensor_id == wind_device2.id) .filter(Power.horizon == horizon) .filter( (Power.datetime >= as_server_time(datetime(2015, 1, 1, 11))) @@ -49,15 +50,15 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis): - solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none() - wind_device2: Asset = Asset.query.filter_by(name="wind-asset-2").one_or_none() + solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() + wind_device2: Sensor = Sensor.query.filter_by(name="wind-asset-2").one_or_none() print(solar_device1) print(wind_device2) # makes 8 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], @@ -68,7 +69,7 @@ def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.asset_id == solar_device1.id) + Power.query.filter(Power.sensor_id == solar_device1.id) .filter(Power.horizon == horizon) .filter( (Power.datetime >= as_server_time(datetime(2015, 1, 1, 13))) @@ -93,7 +94,7 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( (fail-test falls back to linear & linear falls back to naive). As a result, there should be forecasts in the DB. """ - solar_device1: Asset = Asset.query.filter_by(name="solar-asset-1").one_or_none() + solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() horizon_hours = 1 horizon = timedelta(hours=horizon_hours) @@ -105,7 +106,7 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( # The failed test model (this failure enqueues a new job) create_forecasting_jobs( - timed_value_type="Power", + timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, hour_start)), end_of_roll=as_server_time(datetime(2015, 1, 1, hour_start + 2)), horizons=[horizon], @@ -126,7 +127,7 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( def make_query(the_horizon_hours: int) -> Query: the_horizon = timedelta(hours=the_horizon_hours) return ( - Power.query.filter(Power.asset_id == solar_device1.id) + Power.query.filter(Power.sensor_id == solar_device1.id) .filter(Power.horizon == the_horizon) .filter( ( diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index bd47ab748..687e1cd43 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -6,7 +6,7 @@ import pytz import timely_beliefs as tb -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.queries.utils import ( @@ -40,8 +40,8 @@ ], ) def test_collect_power(db, app, query_start, query_end, num_values, setup_test_data): - wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none() - data = Power.query.filter(Power.asset_id == wind_device_1.id).all() + wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() + data = Power.query.filter(Power.sensor_id == wind_device_1.id).all() print(data) bdf: tb.BeliefsDataFrame = Power.collect( wind_device_1.name, (query_start, query_end) @@ -90,7 +90,7 @@ def test_collect_power(db, app, query_start, query_end, num_values, setup_test_d def test_collect_power_resampled( db, app, query_start, query_end, resolution, num_values, setup_test_data ): - wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none() + wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() bdf: tb.BeliefsDataFrame = Power.collect( wind_device_1.name, (query_start, query_end), resolution=resolution ) @@ -207,7 +207,7 @@ def test_multiplication_with_both_empty_dataframe(): @pytest.mark.parametrize("check_empty_frame", [True, False]) def test_simplify_index(setup_test_data, check_empty_frame): """Check whether simplify_index retains the event resolution.""" - wind_device_1 = Asset.query.filter_by(name="wind-asset-1").one_or_none() + wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() bdf: tb.BeliefsDataFrame = Power.collect( wind_device_1.name, ( diff --git a/flexmeasures/data/tests/test_scheduling_jobs.py b/flexmeasures/data/tests/test_scheduling_jobs.py index 42a317bdb..6ca2f1c02 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs.py +++ b/flexmeasures/data/tests/test_scheduling_jobs.py @@ -41,7 +41,7 @@ def test_scheduling_a_battery(db, app, add_battery_assets, setup_test_data): ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.asset_id == battery.id) + Power.query.filter(Power.sensor_id == battery.id) .filter(Power.data_source_id == scheduler_source.id) .all() ) diff --git a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py index 18fce924b..499760ed3 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py @@ -63,7 +63,7 @@ def test_scheduling_a_charging_station( ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.asset_id == charging_station.id) + Power.query.filter(Power.sensor_id == charging_station.id) .filter(Power.data_source_id == scheduler_source.id) .all() ) diff --git a/flexmeasures/data/tests/test_user_services.py b/flexmeasures/data/tests/test_user_services.py index 01042e0d8..37e4a0bd1 100644 --- a/flexmeasures/data/tests/test_user_services.py +++ b/flexmeasures/data/tests/test_user_services.py @@ -89,7 +89,7 @@ def test_delete_user(fresh_db, setup_roles_users_fresh_db, app): ).all() asset_ids = [asset.id for asset in user_assets_with_measurements_before] for asset_id in asset_ids: - num_power_measurements = Power.query.filter(Power.asset_id == asset_id).count() + num_power_measurements = Power.query.filter(Power.sensor_id == asset_id).count() assert num_power_measurements == 96 delete_user(prosumer) assert find_user_by_email("test_prosumer_user@seita.nl") is None @@ -97,5 +97,5 @@ def test_delete_user(fresh_db, setup_roles_users_fresh_db, app): assert len(user_assets_after) == 0 assert User.query.count() == num_users_before - 1 for asset_id in asset_ids: - num_power_measurements = Power.query.filter(Power.asset_id == asset_id).count() + num_power_measurements = Power.query.filter(Power.sensor_id == asset_id).count() assert num_power_measurements == 0 diff --git a/flexmeasures/ui/views/analytics.py b/flexmeasures/ui/views/analytics.py index 264cd32b7..73e908c74 100644 --- a/flexmeasures/ui/views/analytics.py +++ b/flexmeasures/ui/views/analytics.py @@ -15,6 +15,7 @@ from flexmeasures.auth.decorators import account_roles_accepted from flexmeasures.data.models.markets import Market +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.weather import WeatherSensor from flexmeasures.data.services.resources import ( get_assets, @@ -99,7 +100,7 @@ def analytics_view(): if view_shows_individual_traces else "none", selected_resource, - selected_market, + selected_market.corresponding_sensor, selected_sensor_type, selected_resource.assets, ) @@ -258,7 +259,7 @@ def analytics_data_view(content, content_type): show_consumption_as_positive, "none", selected_resource, - selected_market, + selected_market.corresponding_sensor, selected_sensor_type, selected_resource.assets, ) @@ -386,7 +387,7 @@ def get_data_and_metrics( show_consumption_as_positive: bool, showing_individual_traces_for: str, selected_resource: Resource, - selected_market, + selected_market_sensor: Sensor, selected_sensor_type, assets, ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float], str, WeatherSensor]: @@ -410,7 +411,7 @@ def get_data_and_metrics( ) data["prices"], data["prices_forecast"], metrics = get_prices_data( metrics, - selected_market, + selected_market_sensor, query_window, resolution, forecast_horizon, @@ -474,7 +475,7 @@ def get_data_and_metrics( "event_value" ] * (1 - error_margin_lower) - unit_factor = revenue_unit_factor("MWh", selected_market.unit) + unit_factor = revenue_unit_factor("MWh", selected_market_sensor.unit) data["rev_cost"], data["rev_cost_forecast"], metrics = get_revenues_costs_data( data["power"], data["prices"], From 33baa90a87744a87cf0f4c9ce77ba50d61e0daac Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Fri, 3 Dec 2021 19:27:06 +0100 Subject: [PATCH 05/46] Clean up sensor schema (#258) Clean up the deserialization of entity addresses into Sensors and the serialization vice versa. * Refactor to remove noqa * Stop supporting serialization of old sensor models into entity addresses --- flexmeasures/api/common/schemas/sensors.py | 47 +++++----------------- 1 file changed, 10 insertions(+), 37 deletions(-) diff --git a/flexmeasures/api/common/schemas/sensors.py b/flexmeasures/api/common/schemas/sensors.py index 27b16669b..6e3d288a6 100644 --- a/flexmeasures/api/common/schemas/sensors.py +++ b/flexmeasures/api/common/schemas/sensors.py @@ -1,5 +1,3 @@ -from typing import Union - from marshmallow import fields from flexmeasures.api import FMValidationError @@ -10,9 +8,6 @@ parse_entity_address, EntityAddressException, ) -from flexmeasures.data.models.assets import Asset -from flexmeasures.data.models.markets import Market -from flexmeasures.data.models.weather import WeatherSensor from flexmeasures.data.models.time_series import Sensor @@ -42,11 +37,8 @@ def __init__( self.fm_scheme = fm_scheme super().__init__(*args, **kwargs) - def _deserialize( # noqa: C901 todo: the noqa can probably be removed after refactoring Asset/Market/WeatherSensor to Sensor - self, value, attr, obj, **kwargs - ) -> Sensor: + def _deserialize(self, value, attr, obj, **kwargs) -> Sensor: """De-serialize to a Sensor.""" - # TODO: After refactoring, unify 3 generic_asset cases -> 1 sensor case try: ea = parse_entity_address(value, self.entity_type, self.fm_scheme) if self.fm_scheme == "fm0": @@ -54,47 +46,28 @@ def _deserialize( # noqa: C901 todo: the noqa can probably be removed after ref sensor = Sensor.query.filter( Sensor.id == ea["asset_id"] ).one_or_none() - if sensor is not None: - return sensor - else: - raise EntityAddressValidationError( - f"Asset with entity address {value} doesn't exist." - ) elif self.entity_type == "market": sensor = Sensor.query.filter( Sensor.name == ea["market_name"] ).one_or_none() - if sensor is not None: - return sensor - else: - raise EntityAddressValidationError( - f"Market with entity address {value} doesn't exist." - ) elif self.entity_type == "weather_sensor": sensor = get_sensor_by_generic_asset_type_and_location( ea["weather_sensor_type_name"], ea["latitude"], ea["longitude"] ) - if sensor is not None: - return sensor - else: - raise EntityAddressValidationError( - f"Weather sensor with entity address {value} doesn't exist." - ) + else: + return NotImplemented else: sensor = Sensor.query.filter(Sensor.id == ea["sensor_id"]).one_or_none() - if sensor is not None: - return sensor - else: - raise EntityAddressValidationError( - f"{self.entity_type} with entity address {value} doesn't exist." - ) + if sensor is not None: + return sensor + else: + raise EntityAddressValidationError( + f"{self.entity_type} with entity address {value} doesn't exist." + ) except EntityAddressException as eae: raise EntityAddressValidationError(str(eae)) - return NotImplemented - def _serialize( - self, value: Union[Sensor, Asset, Market, WeatherSensor], attr, data, **kwargs - ): + def _serialize(self, value: Sensor, attr, data, **kwargs): """Serialize to an entity address.""" if self.fm_scheme == "fm0": return value.entity_address_fm0 From db2ea57904efa690193d89b02d2e6475dee5db2b Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 6 Dec 2021 15:19:17 +0100 Subject: [PATCH 06/46] Issue 259 synchronize how we collect data from data models (#260) This PR synchronizes the function signatures of our old and new data collection methods, thereby supporting us in moving over data to our new data model. It introduces a new TimedBelief.collect method whose function signature is compatible with calls to TimedValue.collect. To ensure equivalency, some query filter utils have been refactored to become filter criteria (i.e. the SQLAlchemy term for that which you pass to a query filter), and timely-beliefs 1.8.0 supports passing additional custom criteria for its search query filters. * Revise collect function of old sensor data models: break up query_window into event_starts_after and event_ends_before * Revise collect function of old sensor data models: break up belief_time_window into beliefs_after and beliefs_before * Revise collect function of old sensor data models: break up belief_horizon_window into horizons_at_least and horizons_at_most * Copy options to filter by source type * Fix type annotations * Refactor query utils: switch over, from applying filters to a query, to building up filter criteria and then applying those criteria at once * Treat empty lists differently from None values * Use exactly the same source filter criteria in TimedBelief.search * Refactor * Refactor more * Start TimedBelief.collect * Allow passing sensor ids * Allow passing single sensor (id) * Get sensor instance from bdf (because the sensor variable might be an int) * Upgrade tb dependency * Also allow sensor names to be passed to the new collect function * TimedBelief.collect is a class method * Don't resample if resolution is None * Add the remaining optional search filters to the collect method * Add tests for the two new ways of querying beliefs * Speed up resampling in case of requiring most recent beliefs only * By default, return most recent beliefs only * Remove reference to resolved pandas issue * Add docstring to new collect method * Explain difference between passing a sensor id or a sensor name * Change type annotation for filter criteria * notin_ deprecated in favour of not_in * pip-compile --- flexmeasures/api/v1/implementations.py | 9 +- .../models/forecasting/model_spec_factory.py | 12 +- flexmeasures/data/models/planning/utils.py | 3 +- flexmeasures/data/models/time_series.py | 161 ++++++++++++++---- flexmeasures/data/queries/analytics.py | 24 ++- flexmeasures/data/queries/utils.py | 96 ++++++----- flexmeasures/data/services/resources.py | 9 +- flexmeasures/data/services/time_series.py | 2 - flexmeasures/data/tests/test_queries.py | 14 +- requirements/app.in | 2 +- requirements/app.txt | 14 +- 11 files changed, 230 insertions(+), 116 deletions(-) diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index d0f16e0d2..c4569d297 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -201,10 +201,13 @@ def collect_connection_and_value_groups( # TODO: fill NaN for non-existing values power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.collect( old_sensor_names=sensor_names, - query_window=(start, end), + event_starts_after=start, + event_ends_before=end, resolution=resolution, - belief_horizon_window=belief_horizon_window, - belief_time_window=belief_time_window, + horizons_at_least=belief_horizon_window[0], + horizons_at_most=belief_horizon_window[1], + beliefs_after=belief_time_window[0], + beliefs_before=belief_time_window[1], user_source_ids=user_source_ids, source_types=source_types, sum_multiple=False, diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 5f751b446..60d94947f 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -180,8 +180,10 @@ def create_initial_model_specs( # noqa: C901 time_series_class=time_series_class, collect_params=dict( old_sensor_names=[sensor.name], - query_window=query_window, - belief_horizon_window=(None, ex_post_horizon), + event_starts_after=query_window[0], + event_ends_before=query_window[1], + horizons_at_least=None, + horizons_at_most=ex_post_horizon, ), feature_transformation=params.get("outcome_var_transformation", None), interpolation_config={"method": "time"}, @@ -296,8 +298,10 @@ def configure_regressors_for_nearest_weather_sensor( time_series_class=Weather, collect_params=dict( old_sensor_names=[closest_sensor.name], - query_window=query_window, - belief_horizon_window=(horizon, None), + event_starts_after=query_window[0], + event_ends_before=query_window[1], + horizons_at_least=horizon, + horizons_at_most=None, ), feature_transformation=regressor_transformation, interpolation_config={"method": "time"}, diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index b44616160..ed06904ff 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -84,7 +84,8 @@ def get_prices( price_bdf: tb.BeliefsDataFrame = Price.collect( sensor.name, - query_window=query_window, + event_starts_after=query_window[0], + event_ends_before=query_window[1], resolution=to_offset(resolution).freqstr, ) price_df = simplify_index(price_bdf) diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 9d87f1e0d..df5bdc079 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -11,13 +11,14 @@ from flexmeasures.data.config import db from flexmeasures.data.queries.utils import ( - add_belief_timing_filter, - add_user_source_filter, - add_source_type_filter, create_beliefs_query, - exclude_source_type_filter, + get_belief_timing_criteria, + get_source_criteria, +) +from flexmeasures.data.services.time_series import ( + collect_time_series_data, + aggregate_values, ) -from flexmeasures.data.services.time_series import collect_time_series_data from flexmeasures.utils.entity_address_utils import build_entity_address from flexmeasures.data.models.charts import chart_type_to_chart_specs from flexmeasures.data.models.data_sources import DataSource @@ -301,7 +302,7 @@ def __init__( @classmethod def search( cls, - sensor: Sensor, + sensor: Union[Sensor, int], event_starts_after: Optional[datetime_type] = None, event_ends_before: Optional[datetime_type] = None, beliefs_after: Optional[datetime_type] = None, @@ -311,6 +312,9 @@ def search( source: Optional[ Union[DataSource, List[DataSource], int, List[int], str, List[str]] ] = None, + user_source_ids: Optional[Union[int, List[int]]] = None, + source_types: Optional[List[str]] = None, + exclude_source_types: Optional[List[str]] = None, most_recent_beliefs_only: bool = False, most_recent_events_only: bool = False, most_recent_only: bool = False, # deprecated @@ -325,8 +329,14 @@ def search( :param horizons_at_least: only return beliefs with a belief horizon equal or greater than this timedelta (for example, use timedelta(0) to get ante knowledge time beliefs) :param horizons_at_most: only return beliefs with a belief horizon equal or less than this timedelta (for example, use timedelta(0) to get post knowledge time beliefs) :param source: search only beliefs by this source (pass the DataSource, or its name or id) or list of sources + :param user_source_ids: Optional list of user source ids to query only specific user sources + :param source_types: Optional list of source type names to query only specific source types * + :param exclude_source_types: Optional list of source type names to exclude specific source types * :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) + + * If user_source_ids is specified, the "user" source type is automatically included (and not excluded). + Somewhat redundant, though still allowed, is to set both source_types and exclude_source_types. """ # todo: deprecate the 'most_recent_only' argument in favor of 'most_recent_beliefs_only' (announced v0.8.0) most_recent_beliefs_only = tb_utils.replace_deprecated_argument( @@ -337,6 +347,9 @@ def search( required_argument=False, ) parsed_sources = parse_source_arg(source) + source_criteria = get_source_criteria( + cls, user_source_ids, source_types, exclude_source_types + ) return cls.search_session( session=db.session, sensor=sensor, @@ -349,8 +362,99 @@ def search( source=parsed_sources, most_recent_beliefs_only=most_recent_beliefs_only, most_recent_events_only=most_recent_events_only, + custom_filter_criteria=source_criteria, ) + @classmethod + def collect( + cls, + sensors: Union[Sensor, int, str, List[Union[Sensor, int, str]]], + event_starts_after: Optional[datetime_type] = None, + event_ends_before: Optional[datetime_type] = None, + horizons_at_least: Optional[timedelta] = None, + horizons_at_most: Optional[timedelta] = None, + beliefs_after: Optional[datetime_type] = None, + beliefs_before: Optional[datetime_type] = None, + source: Optional[ + Union[DataSource, List[DataSource], int, List[int], str, List[str]] + ] = None, + user_source_ids: Union[ + int, List[int] + ] = None, # None is interpreted as all sources + source_types: Optional[List[str]] = None, + exclude_source_types: Optional[List[str]] = None, + most_recent_beliefs_only: bool = True, + most_recent_events_only: bool = False, + resolution: Union[str, timedelta] = None, + sum_multiple: bool = True, + ) -> Union[tb.BeliefsDataFrame, Dict[str, tb.BeliefsDataFrame]]: + """Collect beliefs about events for the given sensors. + + :param sensors: search only these sensors, identified by their instance or id (both unique) or name (non-unique) + :param event_starts_after: only return beliefs about events that start after this datetime (inclusive) + :param event_ends_before: only return beliefs about events that end before this datetime (inclusive) + :param beliefs_after: only return beliefs formed after this datetime (inclusive) + :param beliefs_before: only return beliefs formed before this datetime (inclusive) + :param horizons_at_least: only return beliefs with a belief horizon equal or greater than this timedelta (for example, use timedelta(0) to get ante knowledge time beliefs) + :param horizons_at_most: only return beliefs with a belief horizon equal or less than this timedelta (for example, use timedelta(0) to get post knowledge time beliefs) + :param source: search only beliefs by this source (pass the DataSource, or its name or id) or list of sources + :param user_source_ids: Optional list of user source ids to query only specific user sources + :param source_types: Optional list of source type names to query only specific source types * + :param exclude_source_types: Optional list of source type names to exclude specific source types * + :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) + :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) + :param resolution: Optional timedelta or pandas freqstr used to resample the results ** + :param sum_multiple: if True, sum over multiple sensors; otherwise, return a dictionary with sensor names as key, each holding a BeliefsDataFrame as its value + + * If user_source_ids is specified, the "user" source type is automatically included (and not excluded). + Somewhat redundant, though still allowed, is to set both source_types and exclude_source_types. + ** Note that timely-beliefs converts string resolutions to datetime.timedelta objects (see https://github.com/SeitaBV/timely-beliefs/issues/13). + """ + + # sanity check + assert sensors, "no sensors passed" + + # convert to list + if not isinstance(sensors, list): + sensors = [sensors] + + # convert from sensor names to sensors + sensor_names = [s for s in sensors if isinstance(s, str)] + if sensor_names: + sensors = [s for s in sensors if not isinstance(s, str)] + sensors_from_names = Sensor.query.filter( + Sensor.name.in_(sensor_names) + ).all() + sensors.extend(sensors_from_names) + + bdf_dict = {} + for sensor in sensors: + bdf = cls.search( + sensor, + event_starts_after=event_starts_after, + event_ends_before=event_ends_before, + horizons_at_least=horizons_at_least, + horizons_at_most=horizons_at_most, + beliefs_after=beliefs_after, + beliefs_before=beliefs_before, + source=source, + user_source_ids=user_source_ids, + source_types=source_types, + exclude_source_types=exclude_source_types, + most_recent_beliefs_only=most_recent_beliefs_only, + most_recent_events_only=most_recent_events_only, + ) + if resolution is not None: + bdf = bdf.resample_events( + resolution, keep_only_most_recent_belief=most_recent_beliefs_only + ) + bdf_dict[bdf.sensor.name] = bdf + + if sum_multiple: + return aggregate_values(bdf_dict) + else: + return bdf_dict + @classmethod def add( cls, @@ -463,7 +567,7 @@ def make_query( :param exclude_source_types: Optional list of source type names to exclude specific source types * * If user_source_ids is specified, the "user" source type is automatically included (and not excluded). - Somewhat redundant, but still allowed is to set both source_types and exclude_source_types. + Somewhat redundant, though still allowed, is to set both source_types and exclude_source_types. # todo: add examples @@ -473,37 +577,24 @@ def make_query( session = db.session start, end = query_window query = create_beliefs_query(cls, session, Sensor, old_sensor_names, start, end) - query = add_belief_timing_filter( - cls, query, Sensor, belief_horizon_window, belief_time_window + belief_timing_criteria = get_belief_timing_criteria( + cls, Sensor, belief_horizon_window, belief_time_window ) - if user_source_ids: - query = add_user_source_filter(cls, query, user_source_ids) - if source_types: - if user_source_ids and "user" not in source_types: - source_types.append("user") - query = add_source_type_filter(cls, query, source_types) - if exclude_source_types: - if user_source_ids and "user" in exclude_source_types: - exclude_source_types.remove("user") - query = exclude_source_type_filter(cls, query, exclude_source_types) - return query + source_criteria = get_source_criteria( + cls, user_source_ids, source_types, exclude_source_types + ) + return query.filter(*belief_timing_criteria, *source_criteria) @classmethod def collect( cls, old_sensor_names: Union[str, List[str]], - query_window: Tuple[Optional[datetime_type], Optional[datetime_type]] = ( - None, - None, - ), - belief_horizon_window: Tuple[Optional[timedelta], Optional[timedelta]] = ( - None, - None, - ), - belief_time_window: Tuple[Optional[datetime_type], Optional[datetime_type]] = ( - None, - None, - ), + event_starts_after: Optional[datetime_type] = None, + event_ends_before: Optional[datetime_type] = None, + horizons_at_least: Optional[timedelta] = None, + horizons_at_most: Optional[timedelta] = None, + beliefs_after: Optional[datetime_type] = None, + beliefs_before: Optional[datetime_type] = None, user_source_ids: Union[ int, List[int] ] = None, # None is interpreted as all sources @@ -518,9 +609,9 @@ def collect( return collect_time_series_data( old_sensor_names=old_sensor_names, make_query=cls.make_query, - query_window=query_window, - belief_horizon_window=belief_horizon_window, - belief_time_window=belief_time_window, + query_window=(event_starts_after, event_ends_before), + belief_horizon_window=(horizons_at_least, horizons_at_most), + belief_time_window=(beliefs_after, beliefs_before), user_source_ids=user_source_ids, source_types=source_types, exclude_source_types=exclude_source_types, diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index 3bcc6f9da..b750eea02 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -178,9 +178,11 @@ def get_prices_data( # Get price data price_bdf: tb.BeliefsDataFrame = Price.collect( [market_name], - query_window=query_window, + event_starts_after=query_window[0], + event_ends_before=query_window[1], resolution=resolution, - belief_horizon_window=(None, timedelta(hours=0)), + horizons_at_least=None, + horizons_at_most=timedelta(hours=0), ) price_df: pd.DataFrame = simplify_index( price_bdf, index_levels_to_columns=["belief_horizon", "source"] @@ -194,9 +196,11 @@ def get_prices_data( # Get price forecast price_forecast_bdf: tb.BeliefsDataFrame = Price.collect( [market_name], - query_window=query_window, + event_starts_after=query_window[0], + event_ends_before=query_window[1], resolution=resolution, - belief_horizon_window=(forecast_horizon, None), + horizons_at_least=forecast_horizon, + horizons_at_most=None, source_types=["user", "forecasting script", "script"], ) price_forecast_df: pd.DataFrame = simplify_index( @@ -262,9 +266,11 @@ def get_weather_data( # Get weather data weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.collect( sensor_names, - query_window=query_window, + event_starts_after=query_window[0], + event_ends_before=query_window[1], resolution=resolution, - belief_horizon_window=(None, timedelta(hours=0)), + horizons_at_least=None, + horizons_at_most=timedelta(hours=0), sum_multiple=False, ) weather_df_dict: Dict[str, pd.DataFrame] = {} @@ -277,9 +283,11 @@ def get_weather_data( # Get weather forecasts weather_forecast_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.collect( sensor_names, - query_window=query_window, + event_starts_after=query_window[0], + event_ends_before=query_window[1], resolution=resolution, - belief_horizon_window=(forecast_horizon, None), + horizons_at_least=forecast_horizon, + horizons_at_most=None, source_types=["user", "forecasting script", "script"], sum_multiple=False, ) diff --git a/flexmeasures/data/queries/utils.py b/flexmeasures/data/queries/utils.py index 211752045..f1382c0bd 100644 --- a/flexmeasures/data/queries/utils.py +++ b/flexmeasures/data/queries/utils.py @@ -1,10 +1,11 @@ -from typing import List, Optional, Tuple, Union +from typing import List, Optional, Type, Tuple, Union from datetime import datetime, timedelta import pandas as pd import timely_beliefs as tb from sqlalchemy.orm import Query, Session +from sqlalchemy.sql.elements import BinaryExpression from flexmeasures.data.config import db from flexmeasures.data.models.data_sources import DataSource @@ -13,7 +14,7 @@ def create_beliefs_query( - cls: "ts.TimedValue", + cls: "Type[ts.TimedValue]", session: Session, old_sensor_class: db.Model, old_sensor_names: Tuple[str], @@ -36,54 +37,68 @@ def create_beliefs_query( return query -def add_user_source_filter( - cls: "ts.TimedValue", query: Query, user_source_ids: Union[int, List[int]] -) -> Query: - """Add filter to the query to search only through user data from the specified user sources. +def get_source_criteria( + cls: "Type[ts.TimedValue]", + user_source_ids: Union[int, List[int]], + source_types: List[str], + exclude_source_types: List[str], +) -> List[BinaryExpression]: + source_criteria: List[BinaryExpression] = [] + if user_source_ids is not None: + source_criteria.append(user_source_criterion(cls, user_source_ids)) + if source_types is not None: + if user_source_ids and "user" not in source_types: + source_types.append("user") + source_criteria.append(source_type_criterion(source_types)) + if exclude_source_types is not None: + if user_source_ids and "user" in exclude_source_types: + exclude_source_types.remove("user") + source_criteria.append(source_type_exclusion_criterion(exclude_source_types)) + return source_criteria + + +def user_source_criterion( + cls: "Type[ts.TimedValue]", + user_source_ids: Union[int, List[int]], +) -> BinaryExpression: + """Criterion to search only through user data from the specified user sources. We distinguish user sources (sources with source.type == "user") from other sources (source.type != "user"). Data with a user source originates from a registered user. Data with e.g. a script source originates from a script. - This filter doesn't affect the query over non-user type sources. + This criterion doesn't affect the query over non-user type sources. It does so by ignoring user sources that are not in the given list of source_ids. """ if user_source_ids is not None and not isinstance(user_source_ids, list): user_source_ids = [user_source_ids] # ensure user_source_ids is a list - if user_source_ids: - ignorable_user_sources = ( - DataSource.query.filter(DataSource.type == "user") - .filter(DataSource.id.notin_(user_source_ids)) - .all() - ) - ignorable_user_source_ids = [ - user_source.id for user_source in ignorable_user_sources - ] - query = query.filter(cls.data_source_id.notin_(ignorable_user_source_ids)) - return query + ignorable_user_sources = ( + DataSource.query.filter(DataSource.type == "user") + .filter(DataSource.id.not_in(user_source_ids)) + .all() + ) + ignorable_user_source_ids = [ + user_source.id for user_source in ignorable_user_sources + ] + return cls.data_source_id.not_in(ignorable_user_source_ids) -def add_source_type_filter( - cls: "ts.TimedValue", query: Query, source_types: List[str] -) -> Query: - """Add filter to the query to collect only data from sources that are of the given type.""" - return query.filter(DataSource.type.in_(source_types)) if source_types else query +def source_type_criterion(source_types: List[str]) -> BinaryExpression: + """Criterion to collect only data from sources that are of the given type.""" + return DataSource.type.in_(source_types) -def exclude_source_type_filter( - cls: "ts.TimedValue", query: Query, source_types: List[str] -) -> Query: - """Add filter to the query to exclude sources that are of the given type.""" - return query.filter(DataSource.type.notin_(source_types)) if source_types else query +def source_type_exclusion_criterion(source_types: List[str]) -> BinaryExpression: + """Criterion to exclude sources that are of the given type.""" + return DataSource.type.not_in(source_types) -def add_belief_timing_filter( - cls: "ts.TimedValue", - query: Query, +def get_belief_timing_criteria( + cls: "Type[ts.TimedValue]", asset_class: db.Model, belief_horizon_window: Tuple[Optional[timedelta], Optional[timedelta]], belief_time_window: Tuple[Optional[datetime], Optional[datetime]], -) -> Query: - """Add filters for the desired windows with relevant belief times and belief horizons. +) -> List[BinaryExpression]: + """Get filter criteria for the desired windows with relevant belief times and belief horizons. # todo: interpret belief horizons with respect to knowledge time rather than event end. - a positive horizon denotes a before-the-fact belief (ex-ante w.r.t. knowledge time) @@ -121,24 +136,25 @@ def add_belief_timing_filter( belief_time_window = (None, datetime(2020, 5, 13)) """ + criteria: List[BinaryExpression] = [] earliest_belief_time, latest_belief_time = belief_time_window if ( earliest_belief_time is not None and latest_belief_time is not None and earliest_belief_time == latest_belief_time ): # search directly for a unique belief time - query = query.filter( + criteria.append( cls.datetime + asset_class.event_resolution - cls.horizon == earliest_belief_time ) else: if earliest_belief_time is not None: - query = query.filter( + criteria.append( cls.datetime + asset_class.event_resolution - cls.horizon >= earliest_belief_time ) if latest_belief_time is not None: - query = query.filter( + criteria.append( cls.datetime + asset_class.event_resolution - cls.horizon <= latest_belief_time ) @@ -148,13 +164,13 @@ def add_belief_timing_filter( and long_horizon is not None and short_horizon == long_horizon ): # search directly for a unique belief horizon - query = query.filter(cls.horizon == short_horizon) + criteria.append(cls.horizon == short_horizon) else: if short_horizon is not None: - query = query.filter(cls.horizon >= short_horizon) + criteria.append(cls.horizon >= short_horizon) if long_horizon is not None: - query = query.filter(cls.horizon <= long_horizon) - return query + criteria.append(cls.horizon <= long_horizon) + return criteria def simplify_index( diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index d5c6662af..8d536a1ac 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -479,9 +479,12 @@ def load_sensor_data( # Query the sensors resource_data: Dict[str, tb.BeliefsDataFrame] = sensor_type.collect( old_sensor_names=list(names_of_resource_sensors), - query_window=(start, end), - belief_horizon_window=belief_horizon_window, - belief_time_window=belief_time_window, + event_starts_after=start, + event_ends_before=end, + horizons_at_least=belief_horizon_window[0], + horizons_at_most=belief_horizon_window[1], + beliefs_after=belief_time_window[0], + beliefs_before=belief_time_window[1], source_types=source_types, exclude_source_types=exclude_source_types, resolution=resolution, diff --git a/flexmeasures/data/services/time_series.py b/flexmeasures/data/services/time_series.py index bfab31c70..5765b9525 100644 --- a/flexmeasures/data/services/time_series.py +++ b/flexmeasures/data/services/time_series.py @@ -112,8 +112,6 @@ def query_time_series_data( with each BeliefsDataFrame having an "event_value" column. * Note that we convert string resolutions to datetime.timedelta objects. - Pandas can resample with those, but still has some quirky behaviour with DST: - see https://github.com/pandas-dev/pandas/issues/35219 """ # On demo, we query older data as if it's the current year's data (we convert back below) diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 687e1cd43..7a7307a14 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -43,9 +43,7 @@ def test_collect_power(db, app, query_start, query_end, num_values, setup_test_d wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() data = Power.query.filter(Power.sensor_id == wind_device_1.id).all() print(data) - bdf: tb.BeliefsDataFrame = Power.collect( - wind_device_1.name, (query_start, query_end) - ) + bdf: tb.BeliefsDataFrame = Power.collect(wind_device_1.name, query_start, query_end) print(bdf) assert ( bdf.index.names[0] == "event_start" @@ -92,7 +90,7 @@ def test_collect_power_resampled( ): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() bdf: tb.BeliefsDataFrame = Power.collect( - wind_device_1.name, (query_start, query_end), resolution=resolution + wind_device_1.name, query_start, query_end, resolution=resolution ) print(bdf) assert len(bdf) == num_values @@ -210,10 +208,8 @@ def test_simplify_index(setup_test_data, check_empty_frame): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() bdf: tb.BeliefsDataFrame = Power.collect( wind_device_1.name, - ( - datetime(2015, 1, 1, tzinfo=pytz.utc), - datetime(2015, 1, 2, tzinfo=pytz.utc), - ), + datetime(2015, 1, 1, tzinfo=pytz.utc), + datetime(2015, 1, 2, tzinfo=pytz.utc), resolution=timedelta(minutes=15), ) if check_empty_frame: @@ -229,6 +225,8 @@ def test_query_beliefs(setup_beliefs): source = DataSource.query.filter_by(name="Seita").one_or_none() bdfs = [ TimedBelief.search(sensor, source=source), + TimedBelief.search(sensor.id, source=source), + TimedBelief.collect(sensor.name, source=source), sensor.search_beliefs(source=source), tb.BeliefsDataFrame(sensor.beliefs), # doesn't allow filtering ] diff --git a/requirements/app.in b/requirements/app.in index 9692c7efc..8b791b450 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -32,7 +32,7 @@ netCDF4 siphon tables timetomodel>=0.7.1 -timely-beliefs>=1.7.0 +timely-beliefs>=1.8.0 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index f63ec5cc7..0bb93b3de 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -21,10 +21,6 @@ attrs==21.2.0 # jsonschema # outcome # trio -backports.zoneinfo==0.2.1 - # via - # pytz-deprecation-shim - # tzlocal bcrypt==3.2.0 # via -r requirements/app.in beautifulsoup4==4.10.0 @@ -140,9 +136,7 @@ idna==3.3 importlib-metadata==4.8.1 # via # -r requirements/app.in - # alembic -importlib-resources==5.4.0 - # via alembic + # timely-beliefs inflect==5.3.0 # via -r requirements/app.in inflection==0.5.1 @@ -353,7 +347,7 @@ tables==3.6.1 # via -r requirements/app.in threadpoolctl==3.0.0 # via scikit-learn -timely-beliefs==1.7.0 +timely-beliefs==1.8.0 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in @@ -389,9 +383,7 @@ wtforms==2.3.3 xlrd==2.0.1 # via -r requirements/app.in zipp==3.6.0 - # via - # importlib-metadata - # importlib-resources + # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools From b42ef1a2b201e7fa9a12e5036b1eaa5d056a04ec Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 6 Dec 2021 16:59:33 +0100 Subject: [PATCH 07/46] Merge search and collect (#262) Merging the new TimedBelief.collect method with the existing TimedBelief.search method. This also entails renaming all occurences of TimedValue.collect to become TimedValue.search, in order to safeguard the goal of PR #260. * Rename old collect method to search * Merge new collect method into TimedBelief.search * Deprecate the sensors argument in favor of sensor * Small annotation revert in scope of project 9 * Fix deprecation of required argument --- flexmeasures/api/v1/implementations.py | 2 +- .../models/forecasting/model_spec_factory.py | 34 ++--- flexmeasures/data/models/planning/utils.py | 2 +- flexmeasures/data/models/time_series.py | 117 +++++------------- flexmeasures/data/queries/analytics.py | 8 +- flexmeasures/data/services/resources.py | 2 +- flexmeasures/data/tests/test_queries.py | 8 +- 7 files changed, 61 insertions(+), 112 deletions(-) diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index c4569d297..8399ef324 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -199,7 +199,7 @@ def collect_connection_and_value_groups( # Get the power values # TODO: fill NaN for non-existing values - power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.collect( + power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.search( old_sensor_names=sensor_names, event_starts_after=start, event_ends_before=end, diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 60d94947f..7fceb9463 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -43,20 +43,20 @@ class TBSeriesSpecs(SeriesSpecs): """Compatibility for using timetomodel.SeriesSpecs with timely_beliefs.BeliefsDataFrames. - This implements _load_series such that .collect is called, - with the parameters in collect_params. - The collect function is expected to return a BeliefsDataFrame. + This implements _load_series such that .search is called, + with the parameters in search_params. + The search function is expected to return a BeliefsDataFrame. """ - time_series_class: Any # with method (named "collect" by default) - collect_params: dict + time_series_class: Any # with method (named "search" by default) + search_params: dict def __init__( self, time_series_class, - collect_params: dict, + search_params: dict, name: str, - collect_fnc: str = "collect", + search_fnc: str = "search", original_tz: Optional[tzinfo] = pytz.utc, # postgres stores naive datetimes feature_transformation: Optional[ReversibleTransformation] = None, post_load_processing: Optional[Transformation] = None, @@ -72,14 +72,14 @@ def __init__( interpolation_config, ) self.time_series_class = time_series_class - self.collect_params = collect_params - self.collect_fnc = collect_fnc + self.search_params = search_params + self.search_fnc = search_fnc def _load_series(self) -> pd.Series: logger.info("Reading %s data from database" % self.time_series_class.__name__) - bdf: BeliefsDataFrame = getattr(self.time_series_class, self.collect_fnc)( - **self.collect_params + bdf: BeliefsDataFrame = getattr(self.time_series_class, self.search_fnc)( + **self.search_params ) assert isinstance(bdf, BeliefsDataFrame) df = simplify_index(bdf) @@ -96,19 +96,19 @@ def check_data(self, df: pd.DataFrame): if df.empty: raise MissingData( "No values found in database for the requested %s data. It's no use to continue I'm afraid." - " Here's a print-out of what I tried to collect:\n\n%s\n\n" + " Here's a print-out of what I tried to search for:\n\n%s\n\n" % ( self.time_series_class.__name__, - pformat(self.collect_params, sort_dicts=False), + pformat(self.search_params, sort_dicts=False), ) ) if df.isnull().values.any(): raise NaNData( "Nan values found in database for the requested %s data. It's no use to continue I'm afraid." - " Here's a print-out of what I tried to collect:\n\n%s\n\n" + " Here's a print-out of what I tried to search for:\n\n%s\n\n" % ( self.time_series_class.__name__, - pformat(self.collect_params, sort_dicts=False), + pformat(self.search_params, sort_dicts=False), ) ) @@ -178,7 +178,7 @@ def create_initial_model_specs( # noqa: C901 outcome_var_spec = TBSeriesSpecs( name=sensor.generic_asset.generic_asset_type.name, time_series_class=time_series_class, - collect_params=dict( + search_params=dict( old_sensor_names=[sensor.name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -296,7 +296,7 @@ def configure_regressors_for_nearest_weather_sensor( TBSeriesSpecs( name=regressor_specs_name, time_series_class=Weather, - collect_params=dict( + search_params=dict( old_sensor_names=[closest_sensor.name], event_starts_after=query_window[0], event_ends_before=query_window[1], diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index ed06904ff..c35aa8a86 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -82,7 +82,7 @@ def get_prices( # Look for the applicable market sensor sensor = get_market(sensor) - price_bdf: tb.BeliefsDataFrame = Price.collect( + price_bdf: tb.BeliefsDataFrame = Price.search( sensor.name, event_starts_after=query_window[0], event_ends_before=query_window[1], diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index df5bdc079..fad75a2ba 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -172,7 +172,7 @@ def search_beliefs( required_argument=False, ) bdf = TimedBelief.search( - sensor=self, + sensors=self, event_starts_after=event_starts_after, event_ends_before=event_ends_before, beliefs_after=beliefs_after, @@ -302,7 +302,8 @@ def __init__( @classmethod def search( cls, - sensor: Union[Sensor, int], + sensors: Union[Sensor, int, str, List[Union[Sensor, int, str]]], + sensor: Sensor = None, # deprecated event_starts_after: Optional[datetime_type] = None, event_ends_before: Optional[datetime_type] = None, beliefs_after: Optional[datetime_type] = None, @@ -318,77 +319,10 @@ def search( most_recent_beliefs_only: bool = False, most_recent_events_only: bool = False, most_recent_only: bool = False, # deprecated - ) -> tb.BeliefsDataFrame: - """Search all beliefs about events for a given sensor. - - :param sensor: search only this sensor - :param event_starts_after: only return beliefs about events that start after this datetime (inclusive) - :param event_ends_before: only return beliefs about events that end before this datetime (inclusive) - :param beliefs_after: only return beliefs formed after this datetime (inclusive) - :param beliefs_before: only return beliefs formed before this datetime (inclusive) - :param horizons_at_least: only return beliefs with a belief horizon equal or greater than this timedelta (for example, use timedelta(0) to get ante knowledge time beliefs) - :param horizons_at_most: only return beliefs with a belief horizon equal or less than this timedelta (for example, use timedelta(0) to get post knowledge time beliefs) - :param source: search only beliefs by this source (pass the DataSource, or its name or id) or list of sources - :param user_source_ids: Optional list of user source ids to query only specific user sources - :param source_types: Optional list of source type names to query only specific source types * - :param exclude_source_types: Optional list of source type names to exclude specific source types * - :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) - :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) - - * If user_source_ids is specified, the "user" source type is automatically included (and not excluded). - Somewhat redundant, though still allowed, is to set both source_types and exclude_source_types. - """ - # todo: deprecate the 'most_recent_only' argument in favor of 'most_recent_beliefs_only' (announced v0.8.0) - most_recent_beliefs_only = tb_utils.replace_deprecated_argument( - "most_recent_only", - most_recent_only, - "most_recent_beliefs_only", - most_recent_beliefs_only, - required_argument=False, - ) - parsed_sources = parse_source_arg(source) - source_criteria = get_source_criteria( - cls, user_source_ids, source_types, exclude_source_types - ) - return cls.search_session( - session=db.session, - sensor=sensor, - event_starts_after=event_starts_after, - event_ends_before=event_ends_before, - beliefs_after=beliefs_after, - beliefs_before=beliefs_before, - horizons_at_least=horizons_at_least, - horizons_at_most=horizons_at_most, - source=parsed_sources, - most_recent_beliefs_only=most_recent_beliefs_only, - most_recent_events_only=most_recent_events_only, - custom_filter_criteria=source_criteria, - ) - - @classmethod - def collect( - cls, - sensors: Union[Sensor, int, str, List[Union[Sensor, int, str]]], - event_starts_after: Optional[datetime_type] = None, - event_ends_before: Optional[datetime_type] = None, - horizons_at_least: Optional[timedelta] = None, - horizons_at_most: Optional[timedelta] = None, - beliefs_after: Optional[datetime_type] = None, - beliefs_before: Optional[datetime_type] = None, - source: Optional[ - Union[DataSource, List[DataSource], int, List[int], str, List[str]] - ] = None, - user_source_ids: Union[ - int, List[int] - ] = None, # None is interpreted as all sources - source_types: Optional[List[str]] = None, - exclude_source_types: Optional[List[str]] = None, - most_recent_beliefs_only: bool = True, - most_recent_events_only: bool = False, resolution: Union[str, timedelta] = None, sum_multiple: bool = True, ) -> Union[tb.BeliefsDataFrame, Dict[str, tb.BeliefsDataFrame]]: - """Collect beliefs about events for the given sensors. + """Search all beliefs about events for the given sensors. :param sensors: search only these sensors, identified by their instance or id (both unique) or name (non-unique) :param event_starts_after: only return beliefs about events that start after this datetime (inclusive) @@ -410,13 +344,24 @@ def collect( Somewhat redundant, though still allowed, is to set both source_types and exclude_source_types. ** Note that timely-beliefs converts string resolutions to datetime.timedelta objects (see https://github.com/SeitaBV/timely-beliefs/issues/13). """ - - # sanity check - assert sensors, "no sensors passed" + # todo: deprecate the 'sensor' argument in favor of 'sensors' (announced v0.8.0) + sensors = tb_utils.replace_deprecated_argument( + "sensor", + sensor, + "sensors", + sensors, + ) + # todo: deprecate the 'most_recent_only' argument in favor of 'most_recent_beliefs_only' (announced v0.8.0) + most_recent_beliefs_only = tb_utils.replace_deprecated_argument( + "most_recent_only", + most_recent_only, + "most_recent_beliefs_only", + most_recent_beliefs_only, + required_argument=False, + ) # convert to list - if not isinstance(sensors, list): - sensors = [sensors] + sensors = [sensors] if not isinstance(sensors, list) else sensors # convert from sensor names to sensors sensor_names = [s for s in sensors if isinstance(s, str)] @@ -427,22 +372,26 @@ def collect( ).all() sensors.extend(sensors_from_names) + parsed_sources = parse_source_arg(source) + source_criteria = get_source_criteria( + cls, user_source_ids, source_types, exclude_source_types + ) + bdf_dict = {} for sensor in sensors: - bdf = cls.search( - sensor, + bdf = cls.search_session( + session=db.session, + sensor=sensor, event_starts_after=event_starts_after, event_ends_before=event_ends_before, - horizons_at_least=horizons_at_least, - horizons_at_most=horizons_at_most, beliefs_after=beliefs_after, beliefs_before=beliefs_before, - source=source, - user_source_ids=user_source_ids, - source_types=source_types, - exclude_source_types=exclude_source_types, + horizons_at_least=horizons_at_least, + horizons_at_most=horizons_at_most, + source=parsed_sources, most_recent_beliefs_only=most_recent_beliefs_only, most_recent_events_only=most_recent_events_only, + custom_filter_criteria=source_criteria, ) if resolution is not None: bdf = bdf.resample_events( @@ -586,7 +535,7 @@ def make_query( return query.filter(*belief_timing_criteria, *source_criteria) @classmethod - def collect( + def search( cls, old_sensor_names: Union[str, List[str]], event_starts_after: Optional[datetime_type] = None, diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index b750eea02..08b364456 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -176,7 +176,7 @@ def get_prices_data( market_name = "" if market_sensor is None else market_sensor.name # Get price data - price_bdf: tb.BeliefsDataFrame = Price.collect( + price_bdf: tb.BeliefsDataFrame = Price.search( [market_name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -194,7 +194,7 @@ def get_prices_data( metrics["realised_unit_price"] = np.NaN # Get price forecast - price_forecast_bdf: tb.BeliefsDataFrame = Price.collect( + price_forecast_bdf: tb.BeliefsDataFrame = Price.search( [market_name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -264,7 +264,7 @@ def get_weather_data( sensor_names = [sensor.name for sensor in closest_sensors] # Get weather data - weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.collect( + weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.search( sensor_names, event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -281,7 +281,7 @@ def get_weather_data( ) # Get weather forecasts - weather_forecast_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.collect( + weather_forecast_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.search( sensor_names, event_starts_after=query_window[0], event_ends_before=query_window[1], diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index 8d536a1ac..bc34a8db9 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -477,7 +477,7 @@ def load_sensor_data( ) # Query the sensors - resource_data: Dict[str, tb.BeliefsDataFrame] = sensor_type.collect( + resource_data: Dict[str, tb.BeliefsDataFrame] = sensor_type.search( old_sensor_names=list(names_of_resource_sensors), event_starts_after=start, event_ends_before=end, diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 7a7307a14..2f70a5c8b 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -43,7 +43,7 @@ def test_collect_power(db, app, query_start, query_end, num_values, setup_test_d wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() data = Power.query.filter(Power.sensor_id == wind_device_1.id).all() print(data) - bdf: tb.BeliefsDataFrame = Power.collect(wind_device_1.name, query_start, query_end) + bdf: tb.BeliefsDataFrame = Power.search(wind_device_1.name, query_start, query_end) print(bdf) assert ( bdf.index.names[0] == "event_start" @@ -89,7 +89,7 @@ def test_collect_power_resampled( db, app, query_start, query_end, resolution, num_values, setup_test_data ): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - bdf: tb.BeliefsDataFrame = Power.collect( + bdf: tb.BeliefsDataFrame = Power.search( wind_device_1.name, query_start, query_end, resolution=resolution ) print(bdf) @@ -206,7 +206,7 @@ def test_multiplication_with_both_empty_dataframe(): def test_simplify_index(setup_test_data, check_empty_frame): """Check whether simplify_index retains the event resolution.""" wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - bdf: tb.BeliefsDataFrame = Power.collect( + bdf: tb.BeliefsDataFrame = Power.search( wind_device_1.name, datetime(2015, 1, 1, tzinfo=pytz.utc), datetime(2015, 1, 2, tzinfo=pytz.utc), @@ -226,7 +226,7 @@ def test_query_beliefs(setup_beliefs): bdfs = [ TimedBelief.search(sensor, source=source), TimedBelief.search(sensor.id, source=source), - TimedBelief.collect(sensor.name, source=source), + TimedBelief.search(sensor.name, source=source), sensor.search_beliefs(source=source), tb.BeliefsDataFrame(sensor.beliefs), # doesn't allow filtering ] From cb5892047bfd413ce10251e9af615d8821d20716 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Tue, 7 Dec 2021 10:47:59 +0100 Subject: [PATCH 08/46] Issue 261 add changelog documentation for project 9 (#264) * Changelog entry for project 9 * Add upgrade warning * Add asset CRUD warning --- documentation/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 0207402d5..6124fec02 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -5,6 +5,9 @@ FlexMeasures Changelog v0.8.0 | November XX, 2021 =========================== +.. warning:: Upgrading to this version requires running ``flexmeasures db upgrade`` (you can create a backup first with ``flexmeasures db-ops dump``). +.. warning:: Changes to asset attributes made via the UI or API are not reflected in the new data model until `issue #247 `_ is resolved. + New features ----------- * Charts with sensor data can be requested in one of the supported [`vega-lite themes `_] (incl. a dark theme) [see `PR #221 `_] @@ -19,6 +22,7 @@ Infrastructure / Support * Improve data specification for forecasting models using timely-beliefs data [see `PR #154 `_] * Allow plugins to register their custom config settings, so that FlexMeasures can check whether they are set up correctly [see `PR #230 `_ and `PR #237 `_] * Added sensor method to obtain just its latest state (excl. forecasts) [see `PR #235 `_] +* Migrate attributes of assets, markets and weather sensors to our new sensor model [see `PR #254 `_ and `project 9 `_] v0.7.1 | November 08, 2021 From 84c959f358134c276ecb77f5c69fb93176afebf7 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Thu, 9 Dec 2021 10:37:14 +0100 Subject: [PATCH 09/46] Fall back scheduler heuristics (#267) Introduce a fallback policy for charging schedules of batteries and Charge Points, in cases where the solver is presented with an infeasible problem. * Implement and test fallback policy for infeasible scheduler results Signed-off-by: F.N. Claessen * Reform old Asset properties into Sensor properties Signed-off-by: F.N. Claessen * Simplify Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/implementations.py | 4 +- .../api/v2_0/implementations/sensors.py | 4 +- flexmeasures/data/models/planning/battery.py | 11 +++- .../data/models/planning/charging_station.py | 16 +++-- flexmeasures/data/models/planning/solver.py | 11 ++-- .../data/models/planning/tests/test_solver.py | 62 ++++++++++++++++++- flexmeasures/data/models/planning/utils.py | 61 ++++++++++++++++++ flexmeasures/data/models/time_series.py | 18 +++++- 8 files changed, 171 insertions(+), 16 deletions(-) diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 8399ef324..d8ebe5f43 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -274,7 +274,7 @@ def create_connection_and_value_groups( # noqa: C901 return unrecognized_connection_group() # Validate the sign of the values (following USEF specs with positive consumption and negative production) - if sensor.get_attribute("is_pure_consumer") and any( + if sensor.get_attribute("is_strictly_non_positive") and any( v < 0 for v in value_group ): extra_info = ( @@ -282,7 +282,7 @@ def create_connection_and_value_groups( # noqa: C901 % sensor.entity_address ) return power_value_too_small(extra_info) - elif sensor.get_attribute("is_pure_producer") and any( + elif sensor.get_attribute("is_strictly_non_negative") and any( v > 0 for v in value_group ): extra_info = ( diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index 03e3cb29d..7ff25aa36 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -328,7 +328,7 @@ def post_power_data( return unrecognized_connection_group() # Validate the sign of the values (following USEF specs with positive consumption and negative production) - if sensor.get_attribute("is_pure_consumer") and any( + if sensor.get_attribute("is_strictly_non_positive") and any( v < 0 for v in event_values ): extra_info = ( @@ -336,7 +336,7 @@ def post_power_data( % sensor.entity_address ) return power_value_too_small(extra_info) - elif sensor.get_attribute("is_pure_producer") and any( + elif sensor.get_attribute("is_strictly_non_negative") and any( v > 0 for v in event_values ): extra_info = ( diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index 265f90058..98d95ba0c 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -10,6 +10,7 @@ initialize_series, add_tiny_price_slope, get_prices, + fallback_charging_policy, ) @@ -93,13 +94,19 @@ def schedule_battery( columns = ["derivative max", "derivative min"] ems_constraints = initialize_df(columns, start, end, resolution) - ems_schedule, expected_costs = device_scheduler( + ems_schedule, expected_costs, scheduler_results = device_scheduler( device_constraints, ems_constraints, commitment_quantities, commitment_downwards_deviation_price, commitment_upwards_deviation_price, ) - battery_schedule = ems_schedule[0] + if scheduler_results.solver.termination_condition == "infeasible": + # Fallback policy if the problem was unsolvable + battery_schedule = fallback_charging_policy( + sensor, device_constraints[0], start, end, resolution + ) + else: + battery_schedule = ems_schedule[0] return battery_schedule diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 93b2f3c92..279fd9b71 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -10,6 +10,7 @@ initialize_series, add_tiny_price_slope, get_prices, + fallback_charging_policy, ) @@ -82,13 +83,14 @@ def schedule_charging_station( ) - soc_at_start * ( timedelta(hours=1) / resolution ) # Lacking information about the battery's nominal capacity, we use the highest target value as the maximum state of charge - if sensor.get_attribute("is_pure_consumer", False): + + if sensor.get_attribute("is_strictly_non_positive"): device_constraints[0]["derivative min"] = 0 else: device_constraints[0]["derivative min"] = ( sensor.get_attribute("capacity_in_mw") * -1 ) - if sensor.get_attribute("is_pure_producer", False): + if sensor.get_attribute("is_strictly_non_negative"): device_constraints[0]["derivative max"] = 0 else: device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") @@ -97,13 +99,19 @@ def schedule_charging_station( columns = ["derivative max", "derivative min"] ems_constraints = initialize_df(columns, start, end, resolution) - ems_schedule, expected_costs = device_scheduler( + ems_schedule, expected_costs, scheduler_results = device_scheduler( device_constraints, ems_constraints, commitment_quantities, commitment_downwards_deviation_price, commitment_upwards_deviation_price, ) - charging_station_schedule = ems_schedule[0] + if scheduler_results.solver.termination_condition == "infeasible": + # Fallback policy if the problem was unsolvable + charging_station_schedule = fallback_charging_policy( + sensor, device_constraints[0], start, end, resolution + ) + else: + charging_station_schedule = ems_schedule[0] return charging_station_schedule diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index f7648ef3e..78341b5e8 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -16,7 +16,7 @@ ) from pyomo.environ import UnknownSolver # noqa F401 from pyomo.environ import value -from pyomo.opt import SolverFactory +from pyomo.opt import SolverFactory, SolverResults from flexmeasures.data.models.planning.utils import initialize_series @@ -29,7 +29,7 @@ def device_scheduler( # noqa C901 commitment_quantities: List[pd.Series], commitment_downwards_deviation_price: Union[List[pd.Series], List[float]], commitment_upwards_deviation_price: Union[List[pd.Series], List[float]], -) -> Tuple[List[pd.Series], float]: +) -> Tuple[List[pd.Series], float, SolverResults]: """Schedule devices given constraints on a device and EMS level, and given a list of commitments by the EMS. The commitments are assumed to be with regards to the flow of energy to the device (positive for consumption, negative for production). The solver minimises the costs of deviating from the commitments. @@ -223,7 +223,9 @@ def cost_function(m): model.costs = Objective(rule=cost_function, sense=minimize) # Solve - SolverFactory(current_app.config.get("FLEXMEASURES_LP_SOLVER")).solve(model) + results = SolverFactory(current_app.config.get("FLEXMEASURES_LP_SOLVER")).solve( + model + ) planned_costs = value(model.costs) planned_power_per_device = [] @@ -239,6 +241,7 @@ def cost_function(m): ) # model.pprint() + # print(results.solver.termination_condition) # print(planned_costs) # input() - return planned_power_per_device, planned_costs + return planned_power_per_device, planned_costs, results diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index 9daef80db..10562f14d 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -78,7 +78,8 @@ def test_battery_solver_day_2(add_battery_assets): def test_charging_station_solver_day_2(target_soc, charging_station_name): """Starting with a state of charge 1 kWh, within 2 hours we should be able to reach any state of charge in the range [1, 5] kWh for a unidirectional station, - or [0, 5] for a bidirectional station.""" + or [0, 5] for a bidirectional station, given a charging capacity of 2 kW. + """ soc_at_start = 1 duration_until_target = timedelta(hours=2) @@ -86,6 +87,7 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): charging_station = Sensor.query.filter( Sensor.name == charging_station_name ).one_or_none() + assert charging_station.get_attribute("capacity_in_mw") == 2 assert Sensor.query.get(charging_station.get_attribute("market_id")) == epex_da start = as_server_time(datetime(2015, 1, 2)) end = as_server_time(datetime(2015, 1, 3)) @@ -113,3 +115,61 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): print(consumption_schedule.head(12)) print(soc_schedule.head(12)) assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < 0.00001 + + +@pytest.mark.parametrize( + "target_soc, charging_station_name", + [ + (9, "Test charging station"), + (15, "Test charging station"), + (5, "Test charging station (bidirectional)"), + (15, "Test charging station (bidirectional)"), + ], +) +def test_fallback_to_unsolvable_problem(target_soc, charging_station_name): + """Starting with a state of charge 10 kWh, within 2 hours we should be able to reach + any state of charge in the range [10, 14] kWh for a unidirectional station, + or [6, 14] for a bidirectional station, given a charging capacity of 2 kW. + Here we test target states of charge outside that range, ones that we should be able + to get as close to as 1 kWh difference. + We want our scheduler to handle unsolvable problems like these with a sensible fallback policy. + """ + soc_at_start = 10 + duration_until_target = timedelta(hours=2) + expected_gap = 1 + + epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() + charging_station = Sensor.query.filter( + Sensor.name == charging_station_name + ).one_or_none() + assert charging_station.get_attribute("capacity_in_mw") == 2 + assert Sensor.query.get(charging_station.get_attribute("market_id")) == epex_da + start = as_server_time(datetime(2015, 1, 2)) + end = as_server_time(datetime(2015, 1, 3)) + resolution = timedelta(minutes=15) + target_soc_datetime = start + duration_until_target + soc_targets = pd.Series( + np.nan, index=pd.date_range(start, end, freq=resolution, closed="right") + ) + soc_targets.loc[target_soc_datetime] = target_soc + consumption_schedule = schedule_charging_station( + charging_station, start, end, resolution, soc_at_start, soc_targets + ) + soc_schedule = integrate_time_series( + consumption_schedule, soc_at_start, decimal_precision=6 + ) + + # Check if constraints were met + assert ( + min(consumption_schedule.values) + >= charging_station.get_attribute("capacity_in_mw") * -1 + ) + assert max(consumption_schedule.values) <= charging_station.get_attribute( + "capacity_in_mw" + ) + print(consumption_schedule.head(12)) + print(soc_schedule.head(12)) + assert ( + abs(abs(soc_schedule.loc[target_soc_datetime] - target_soc) - expected_gap) + < 0.00001 + ) diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index c35aa8a86..333d55cb3 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -111,3 +111,64 @@ def get_prices( "Prices partially unknown for planning window." ) return price_df, query_window + + +def fallback_charging_policy( + sensor: Sensor, + device_constraints: pd.DataFrame, + start: datetime, + end: datetime, + resolution: timedelta, +) -> pd.Series: + """This fallback charging policy is to just start charging or discharging, or do neither, + depending on the first target state of charge and the capabilities of the Charge Point. + Note that this ignores any cause of the infeasibility and, + while probably a decent policy for Charge Points, + should not be considered a robust policy for other asset types. + """ + charge_power = ( + sensor.get_attribute("capacity_in_mw") + if sensor.get_attribute("is_consumer") + else 0 + ) + discharge_power = ( + -sensor.get_attribute("capacity_in_mw") + if sensor.get_attribute("is_producer") + else 0 + ) + + charge_schedule = initialize_series(charge_power, start, end, resolution) + discharge_schedule = initialize_series(discharge_power, start, end, resolution) + idle_schedule = initialize_series(0, start, end, resolution) + if ( + device_constraints["max"].first_valid_index() is not None + and device_constraints["max"][device_constraints["max"].first_valid_index()] < 0 + ): + # start discharging to try and bring back the soc below the next max constraint + return discharge_schedule + if ( + device_constraints["min"].first_valid_index() is not None + and device_constraints["min"][device_constraints["min"].first_valid_index()] > 0 + ): + # start charging to try and bring back the soc above the next min constraint + return charge_schedule + if ( + device_constraints["equals"].first_valid_index() is not None + and device_constraints["equals"][ + device_constraints["equals"].first_valid_index() + ] + > 0 + ): + # start charging to get as close as possible to the next target + return charge_schedule + if ( + device_constraints["equals"].first_valid_index() is not None + and device_constraints["equals"][ + device_constraints["equals"].first_valid_index() + ] + < 0 + ): + # start discharging to get as close as possible to the next target + return discharge_schedule + # stand idle + return idle_schedule diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index fad75a2ba..126307090 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -85,14 +85,30 @@ def location(self) -> Optional[Tuple[float, float]]: return self.latitude, self.longitude return None + @property + def is_strictly_non_positive(self) -> bool: + """Return True if this sensor strictly records non-positive values.""" + return self.get_attribute("is_consumer", False) and not self.get_attribute( + "is_producer", True + ) + + @property + def is_strictly_non_negative(self) -> bool: + """Return True if this sensor strictly records non-negative values.""" + return self.get_attribute("is_producer", False) and not self.get_attribute( + "is_consumer", True + ) + def get_attribute(self, attribute: str, default: Any = None) -> Any: """Looks for the attribute on the Sensor. If not found, looks for the attribute on the Sensor's GenericAsset. If not found, returns the default. """ + if hasattr(self, attribute): + return getattr(self, attribute) if attribute in self.attributes: return self.attributes[attribute] - elif attribute in self.generic_asset.attributes: + if attribute in self.generic_asset.attributes: return self.generic_asset.attributes[attribute] return default From 98172dfa83be849157383c72e8d99f31424a4a23 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Thu, 9 Dec 2021 10:43:25 +0100 Subject: [PATCH 10/46] Issue 265 copy over weather correlations from the asset model to the generic asset model (#266) Revise a previous db migration (6cf5b241b85f), but still under the scope of project 9. GenericAssets that correspond to an old Asset should now have a new attribute "correlations", listing names of generic asset types. * Migrate weather correlations to GenericAsset attributes Signed-off-by: F.N. Claessen * Clean up todos Signed-off-by: F.N. Claessen --- ...es_from_old_data_models_to_GenericAsset.py | 97 +++++++++++++++++-- 1 file changed, 89 insertions(+), 8 deletions(-) diff --git a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py index b849d2b12..1d6966c87 100644 --- a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py +++ b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py @@ -37,14 +37,19 @@ def upgrade(): "sensor", sa.Column("attributes", sa.JSON(), nullable=True, default={}), ) - # todo: find places where we look for seasonality and get it from the corresponding GenericAsset instead - # todo: find places where we look for old_model_type and get it from the corresponding GenericAsset instead # Declare ORM table views + t_generic_asset_type = sa.Table( + "generic_asset_type", + sa.MetaData(), + sa.Column("id"), + sa.Column("name"), + ) t_generic_asset = sa.Table( "generic_asset", sa.MetaData(), sa.Column("id"), + sa.Column("generic_asset_type_id"), sa.Column("attributes"), ) t_sensor = sa.Table( @@ -149,6 +154,8 @@ def upgrade(): connection, t_market, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target=t_sensor, t_old_model_type=t_market_type, old_model_attributes=["id", "market_type_name", "display_name"], @@ -162,6 +169,8 @@ def upgrade(): connection, t_market, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target=t_generic_asset, t_old_model_type=t_market_type, old_model_attributes=["id", "market_type_name", "display_name"], @@ -170,6 +179,8 @@ def upgrade(): connection, t_weather_sensor, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target=t_sensor, t_old_model_type=t_weather_sensor_type, old_model_attributes=["id", "weather_sensor_type_name", "display_name"], @@ -183,6 +194,8 @@ def upgrade(): connection, t_weather_sensor, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target=t_generic_asset, t_old_model_type=t_weather_sensor_type, old_model_attributes=["id", "weather_sensor_type_name", "display_name"], @@ -191,6 +204,8 @@ def upgrade(): connection, t_asset, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target=t_sensor, t_old_model_type=t_asset_type, old_model_attributes=[ @@ -212,6 +227,8 @@ def upgrade(): connection, t_asset, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target=t_generic_asset, t_old_model_type=t_asset_type, old_model_attributes=[ @@ -228,6 +245,26 @@ def upgrade(): "can_curtail", "can_shift", ], + extra_attributes_depending_on_old_model_type_name={ + "solar": { + "correlations": ["radiation"], + }, + "wind": { + "correlations": ["wind_speed"], + }, + "one-way_evse": { + "correlations": ["temperature"], + }, + "two-way_evse": { + "correlations": ["temperature"], + }, + "battery": { + "correlations": ["temperature"], + }, + "building": { + "correlations": ["temperature"], + }, + }, # The GenericAssetType table had these hardcoded weather correlations ) op.alter_column( "sensor", @@ -285,24 +322,28 @@ def copy_attributes( connection, t_old_model, t_sensor, + t_generic_asset_type, + t_generic_asset, t_target, t_old_model_type, old_model_attributes, old_model_type_attributes=[], extra_attributes={}, + extra_attributes_depending_on_old_model_type_name={}, ): """ :param old_model_attributes: first two attributes should be id and old_model_type_name, then any other columns we want to copy over from the old model :param old_model_type_attributes: columns we want to copy over from the old model type :param extra_attributes: any additional attributes we want to set + :param extra_attributes_depending_on_old_model_type_name: any additional attributes we want to set, depending on old model type name """ # Get attributes from old model results = connection.execute( sa.select([getattr(t_old_model.c, a) for a in old_model_attributes]) ).fetchall() - for id, type_name, *args in results: + for _id, type_name, *args in results: # Obtain attributes we want to copy over, from the old model old_model_attributes_to_copy = { @@ -318,18 +359,52 @@ def copy_attributes( old_model_type_attributes=old_model_type_attributes, ) - # Find out where to copy over the attributes + # Find out where to copy over the attributes and where the old sensor type lives if t_target.name == "generic_asset": - target_id = get_generic_asset_id(connection, id, t_sensor) + target_id = get_generic_asset_id(connection, _id, t_sensor) elif t_target.name == "sensor": - target_id = id + target_id = _id else: raise ValueError - # Fill in the target class's attributes - connection.execute( + # Fill in the target class's attributes: A) first those with extra attributes depending on model type name + generic_asset_type_names_with_extra_attributes = ( + extra_attributes_depending_on_old_model_type_name.keys() + ) + if t_target.name == "generic_asset": + for gatn in generic_asset_type_names_with_extra_attributes: + connection.execute( + t_target.update() + .where(t_target.c.id == target_id) + .where( + t_generic_asset_type.c.id + == t_generic_asset.c.generic_asset_type_id + ) + .where(t_generic_asset_type.c.name == gatn) + .values( + attributes=json.dumps( + { + **old_model_attributes_to_copy, + **old_model_type_attributes_to_copy, + **extra_attributes, + **extra_attributes_depending_on_old_model_type_name[ + gatn + ], + } + ) + ) + ) + + # Fill in the target class's attributes: B) then those without extra attributes depending on model type name + query = ( t_target.update() .where(t_target.c.id == target_id) + .where(t_generic_asset_type.c.id == t_generic_asset.c.generic_asset_type_id) + .where( + t_generic_asset_type.c.name.not_in( + generic_asset_type_names_with_extra_attributes + ) + ) .values( attributes=json.dumps( { @@ -340,6 +415,12 @@ def copy_attributes( ) ) ) + if t_target.name == "generic_asset": + connection.execute(query) + elif t_target.name == "sensor": + connection.execute( + query.where(t_sensor.c.generic_asset_id == t_generic_asset.c.id) + ) def get_generic_asset_id(connection, old_model_id: int, t_sensors) -> int: From 38c3726d5dd6b84a5e938988b2b5bf8249b834cb Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Fri, 10 Dec 2021 16:07:46 +0100 Subject: [PATCH 11/46] Ensure backwards compatibility of Power init and Market init (#269) In case old plugins are still initializing Power and Price objects, ensure backwards compatibility and warn for deprecation. NB Weather objects were already backwards compatible, because we already used sensor_id to mean weather_sensor_id for the old model. * Ensure backwards compatibility of Power init and Market init Signed-off-by: F.N. Claessen * Use tb utils to also warn about deprecation Signed-off-by: F.N. Claessen * Pop old argument from kwargs Signed-off-by: F.N. Claessen --- flexmeasures/data/models/assets.py | 10 ++++++++++ flexmeasures/data/models/markets.py | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index 4584a93c3..af13d9a23 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -3,6 +3,7 @@ import isodate import timely_beliefs as tb +import timely_beliefs.utils as tb_utils from sqlalchemy.orm import Query from flexmeasures.data.config import db @@ -347,6 +348,15 @@ def to_dict(self): } def __init__(self, **kwargs): + # todo: deprecate the 'asset_id' argument in favor of 'sensor_id' (announced v0.8.0) + if "asset_id" in kwargs and "sensor_id" not in kwargs: + kwargs["sensor_id"] = tb_utils.replace_deprecated_argument( + "sensor_id", + kwargs["sensor_id"], + "asset_id", + kwargs["asset_id"], + ) + kwargs.pop("asset_id", None) super(Power, self).__init__(**kwargs) def __repr__(self): diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index b2127eb5b..3bbba35ea 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -2,6 +2,7 @@ import timely_beliefs as tb from timely_beliefs.sensors.func_store import knowledge_horizons +import timely_beliefs.utils as tb_utils from sqlalchemy.orm import Query from flexmeasures.data.config import db @@ -203,4 +204,13 @@ def make_query(cls, **kwargs) -> Query: return super().make_query(**kwargs) def __init__(self, **kwargs): + # todo: deprecate the 'market_id' argument in favor of 'sensor_id' (announced v0.8.0) + if "market_id" in kwargs and "sensor_id" not in kwargs: + kwargs["sensor_id"] = tb_utils.replace_deprecated_argument( + "sensor_id", + kwargs["sensor_id"], + "market_id", + kwargs["market_id"], + ) + kwargs.pop("market_id", None) super(Price, self).__init__(**kwargs) From 50e1c2dbd7ebcbe40ddbbc6f0db6d4d8f133c518 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Fri, 10 Dec 2021 16:18:54 +0100 Subject: [PATCH 12/46] Switch order of charging fallback policy to prioritize approaching SOC targets above staying within SOC constraints (#270) Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/utils.py | 24 +++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index 333d55cb3..1d57ba599 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -140,18 +140,6 @@ def fallback_charging_policy( charge_schedule = initialize_series(charge_power, start, end, resolution) discharge_schedule = initialize_series(discharge_power, start, end, resolution) idle_schedule = initialize_series(0, start, end, resolution) - if ( - device_constraints["max"].first_valid_index() is not None - and device_constraints["max"][device_constraints["max"].first_valid_index()] < 0 - ): - # start discharging to try and bring back the soc below the next max constraint - return discharge_schedule - if ( - device_constraints["min"].first_valid_index() is not None - and device_constraints["min"][device_constraints["min"].first_valid_index()] > 0 - ): - # start charging to try and bring back the soc above the next min constraint - return charge_schedule if ( device_constraints["equals"].first_valid_index() is not None and device_constraints["equals"][ @@ -170,5 +158,17 @@ def fallback_charging_policy( ): # start discharging to get as close as possible to the next target return discharge_schedule + if ( + device_constraints["max"].first_valid_index() is not None + and device_constraints["max"][device_constraints["max"].first_valid_index()] < 0 + ): + # start discharging to try and bring back the soc below the next max constraint + return discharge_schedule + if ( + device_constraints["min"].first_valid_index() is not None + and device_constraints["min"][device_constraints["min"].first_valid_index()] > 0 + ): + # start charging to try and bring back the soc above the next min constraint + return charge_schedule # stand idle return idle_schedule From ae0a61ee9cc376c0ce151f1b25958e65b89ee970 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Fri, 10 Dec 2021 16:44:41 +0100 Subject: [PATCH 13/46] Fix PR #269 and test the fix (#271) Signed-off-by: F.N. Claessen --- flexmeasures/conftest.py | 4 ++-- flexmeasures/data/models/assets.py | 4 ++-- flexmeasures/data/models/markets.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 240fe4073..4932ebbc4 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -336,7 +336,7 @@ def setup_assets( horizon=parse_duration("PT0M"), value=val, data_source_id=setup_sources["Seita"].id, - sensor_id=asset.id, + asset_id=asset.id, ) db.session.add(p) return {asset.name: asset for asset in assets} @@ -399,7 +399,7 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources horizon=timedelta(hours=0), value=val, data_source_id=setup_sources["Seita"].id, - sensor_id=setup_markets["epex_da"].id, + market_id=setup_markets["epex_da"].id, ) db.session.add(p) diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index af13d9a23..6ce3431c3 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -351,10 +351,10 @@ def __init__(self, **kwargs): # todo: deprecate the 'asset_id' argument in favor of 'sensor_id' (announced v0.8.0) if "asset_id" in kwargs and "sensor_id" not in kwargs: kwargs["sensor_id"] = tb_utils.replace_deprecated_argument( - "sensor_id", - kwargs["sensor_id"], "asset_id", kwargs["asset_id"], + "sensor_id", + None, ) kwargs.pop("asset_id", None) super(Power, self).__init__(**kwargs) diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index 3bbba35ea..83c1ac38f 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -207,10 +207,10 @@ def __init__(self, **kwargs): # todo: deprecate the 'market_id' argument in favor of 'sensor_id' (announced v0.8.0) if "market_id" in kwargs and "sensor_id" not in kwargs: kwargs["sensor_id"] = tb_utils.replace_deprecated_argument( - "sensor_id", - kwargs["sensor_id"], "market_id", kwargs["market_id"], + "sensor_id", + None, ) kwargs.pop("market_id", None) super(Price, self).__init__(**kwargs) From 41cfef202f90a1a63367ce3610313cc9c96959e4 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 20 Dec 2021 10:27:30 +0100 Subject: [PATCH 14/46] Issue 272 move over functionality to find closest sensor (#274) Also refactor and expand documentation. * Copy great circle distance methods from WeatherSensor to GenericAsset Signed-off-by: F.N. Claessen * Test finding the closest sensor Signed-off-by: F.N. Claessen * Return Sensor instead of WeatherSensor Signed-off-by: F.N. Claessen * Look for Sensor instead of WeatherSensor Signed-off-by: F.N. Claessen * Refactor old query Signed-off-by: F.N. Claessen * Return list of Sensors instead of list of WeatherSensors Signed-off-by: F.N. Claessen * Refactor: rename function and argument Signed-off-by: F.N. Claessen * Move tested function Signed-off-by: F.N. Claessen * Fix type annotation Signed-off-by: F.N. Claessen * Refactor: move query function to queries subpackage Signed-off-by: F.N. Claessen * Add docstring Signed-off-by: F.N. Claessen * Add farther temperature sensors to test Signed-off-by: F.N. Claessen * Move geo math to geo_utils Signed-off-by: F.N. Claessen * Add note regarding dependency on Postgres extensions Signed-off-by: F.N. Claessen * Typo Signed-off-by: F.N. Claessen --- documentation/dev/data.rst | 2 +- .../models/forecasting/model_spec_factory.py | 4 +- flexmeasures/data/models/generic_assets.py | 46 +++++++++++++++++++ flexmeasures/data/models/weather.py | 41 ++++------------- flexmeasures/data/queries/analytics.py | 10 ++-- flexmeasures/data/queries/sensors.py | 16 +++++++ flexmeasures/data/scripts/grid_weather.py | 4 +- flexmeasures/data/services/resources.py | 25 ++++++---- flexmeasures/data/tests/conftest.py | 27 +++++++++++ .../data/tests/test_sensor_queries.py | 28 +++++++++++ flexmeasures/ui/views/analytics.py | 16 +++---- flexmeasures/utils/geo_utils.py | 33 +++++++++++++ 12 files changed, 189 insertions(+), 63 deletions(-) create mode 100644 flexmeasures/data/tests/test_sensor_queries.py diff --git a/documentation/dev/data.rst b/documentation/dev/data.rst index dc846b1e8..9d13fb6b0 100644 --- a/documentation/dev/data.rst +++ b/documentation/dev/data.rst @@ -118,7 +118,7 @@ Finally, test if you can log in as the flexmeasures user: Add Postgres Extensions to your database(s) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To find the nearest sensors, FlexMeasures needs some extra POstgres support. +To find the nearest sensors, FlexMeasures needs some extra Postgres support. Add the following extensions while logged in as the postgres superuser: .. code-block:: bash diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 7fceb9463..8219477a9 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -28,7 +28,7 @@ set_training_and_testing_dates, get_query_window, ) -from flexmeasures.data.services.resources import find_closest_weather_sensor +from flexmeasures.data.services.resources import find_closest_sensor """ Here we generate an initial version of timetomodel specs, given what asset and what timing @@ -274,7 +274,7 @@ def configure_regressors_for_nearest_weather_sensor( for sensor_type in sensor_types: # Find nearest weather sensor - closest_sensor = find_closest_weather_sensor(sensor_type, object=sensor) + closest_sensor = find_closest_sensor(sensor_type, object=sensor) if closest_sensor is None: current_app.logger.warning( "No sensor found of sensor type %s to use as regressor for %s." diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index 4c5b57516..a36c122a4 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -1,8 +1,12 @@ from typing import Optional, Tuple +from sqlalchemy.ext.hybrid import hybrid_method +from sqlalchemy.sql.expression import func + from sqlalchemy.ext.mutable import MutableDict from flexmeasures.data import db +from flexmeasures.utils import geo_utils class GenericAssetType(db.Model): @@ -59,6 +63,48 @@ def location(self) -> Optional[Tuple[float, float]]: return self.latitude, self.longitude return None + @hybrid_method + def great_circle_distance(self, **kwargs): + """Query great circle distance (in km). + + Can be called with an object that has latitude and longitude properties, for example: + + great_circle_distance(object=asset) + + Can also be called with latitude and longitude parameters, for example: + + great_circle_distance(latitude=32, longitude=54) + great_circle_distance(lat=32, lng=54) + + """ + other_location = geo_utils.parse_lat_lng(kwargs) + if None in other_location: + return None + return geo_utils.earth_distance(self.location, other_location) + + @great_circle_distance.expression + def great_circle_distance(self, **kwargs): + """Query great circle distance (unclear if in km or in miles). + + Can be called with an object that has latitude and longitude properties, for example: + + great_circle_distance(object=asset) + + Can also be called with latitude and longitude parameters, for example: + + great_circle_distance(latitude=32, longitude=54) + great_circle_distance(lat=32, lng=54) + + Requires the following Postgres extensions: earthdistance and cube. + """ + other_location = geo_utils.parse_lat_lng(kwargs) + if None in other_location: + return None + return func.earth_distance( + func.ll_to_earth(self.latitude, self.longitude), + func.ll_to_earth(*other_location), + ) + def get_attribute(self, attribute: str): if attribute in self.attributes: return self.attributes[attribute] diff --git a/flexmeasures/data/models/weather.py b/flexmeasures/data/models/weather.py index 5a5f4771a..d86fe2421 100644 --- a/flexmeasures/data/models/weather.py +++ b/flexmeasures/data/models/weather.py @@ -1,9 +1,8 @@ from typing import Dict, Tuple -import math import timely_beliefs as tb from sqlalchemy.orm import Query -from sqlalchemy.ext.hybrid import hybrid_method, hybrid_property +from sqlalchemy.ext.hybrid import hybrid_method from sqlalchemy.sql.expression import func from sqlalchemy.schema import UniqueConstraint @@ -18,7 +17,7 @@ GenericAsset, GenericAssetType, ) -from flexmeasures.utils.geo_utils import parse_lat_lng +from flexmeasures.utils import geo_utils from flexmeasures.utils.entity_address_utils import build_entity_address from flexmeasures.utils.flexmeasures_inflection import humanize @@ -186,18 +185,6 @@ def weather_unit(self) -> float: def location(self) -> Tuple[float, float]: return self.latitude, self.longitude - @hybrid_property - def cos_rad_lat(self): - return math.cos(math.radians(self.latitude)) - - @hybrid_property - def sin_rad_lat(self): - return math.sin(math.radians(self.latitude)) - - @hybrid_property - def rad_lng(self): - return math.radians(self.longitude) - @hybrid_method def great_circle_distance(self, **kwargs): """Query great circle distance (in km). @@ -212,22 +199,10 @@ def great_circle_distance(self, **kwargs): great_circle_distance(lat=32, lng=54) """ - r = 6371 # Radius of Earth in kilometres - other_latitude, other_longitude = parse_lat_lng(kwargs) - if other_latitude is None or other_longitude is None: + other_location = geo_utils.parse_lat_lng(kwargs) + if None in other_location: return None - other_cos_rad_lat = math.cos(math.radians(other_latitude)) - other_sin_rad_lat = math.sin(math.radians(other_latitude)) - other_rad_lng = math.radians(other_longitude) - return ( - math.acos( - self.cos_rad_lat - * other_cos_rad_lat - * math.cos(self.rad_lng - other_rad_lng) - + self.sin_rad_lat * other_sin_rad_lat - ) - * r - ) + return geo_utils.earth_distance(self.location, other_location) @great_circle_distance.expression def great_circle_distance(self, **kwargs): @@ -243,12 +218,12 @@ def great_circle_distance(self, **kwargs): great_circle_distance(lat=32, lng=54) """ - other_latitude, other_longitude = parse_lat_lng(kwargs) - if other_latitude is None or other_longitude is None: + other_location = geo_utils.parse_lat_lng(kwargs) + if None in other_location: return None return func.earth_distance( func.ll_to_earth(self.latitude, self.longitude), - func.ll_to_earth(other_latitude, other_longitude), + func.ll_to_earth(*other_location), ) sensor_type = db.relationship( diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index 08b364456..f61dbfd9c 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -11,11 +11,11 @@ ) from flexmeasures.data.services.time_series import set_bdf_source from flexmeasures.utils import calculations, time_utils -from flexmeasures.data.services.resources import Resource, find_closest_weather_sensor +from flexmeasures.data.services.resources import Resource, find_closest_sensor from flexmeasures.data.models.assets import Asset, Power from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import Weather, WeatherSensor, WeatherSensorType +from flexmeasures.data.models.weather import Weather, WeatherSensorType def get_power_data( @@ -234,7 +234,7 @@ def get_weather_data( query_window: Tuple[datetime, datetime], resolution: str, forecast_horizon: timedelta, -) -> Tuple[pd.DataFrame, pd.DataFrame, str, WeatherSensor, dict]: +) -> Tuple[pd.DataFrame, pd.DataFrame, str, Sensor, dict]: """Get most recent weather data and forecast weather data for the requested forecast horizon. Return weather observations, weather forecasts (either might be an empty DataFrame), @@ -254,9 +254,7 @@ def get_weather_data( if sensor_type: # Find the 50 closest weather sensors sensor_type_name = sensor_type.name - closest_sensors = find_closest_weather_sensor( - sensor_type_name, n=50, object=asset - ) + closest_sensors = find_closest_sensor(sensor_type_name, n=50, object=asset) if closest_sensors: closest_sensor = closest_sensors[0] diff --git a/flexmeasures/data/queries/sensors.py b/flexmeasures/data/queries/sensors.py index 8fec46598..dee109a74 100644 --- a/flexmeasures/data/queries/sensors.py +++ b/flexmeasures/data/queries/sensors.py @@ -27,3 +27,19 @@ def query_sensor_by_name_and_generic_asset_type_name( .filter(Sensor.generic_asset_id == GenericAsset.id) ) return query + + +def query_sensors_by_proximity( + generic_asset_type_name: str, latitude: float, longitude: float +) -> Query: + """Match sensors by the name of their generic asset type, and order them by proximity.""" + closest_sensor_query = ( + Sensor.query.join(GenericAsset, GenericAssetType) + .filter( + Sensor.generic_asset_id == GenericAsset.id, + GenericAsset.generic_asset_type_id == GenericAssetType.id, + GenericAssetType.name == generic_asset_type_name, + ) + .order_by(GenericAsset.great_circle_distance(lat=latitude, lng=longitude).asc()) + ) + return closest_sensor_query diff --git a/flexmeasures/data/scripts/grid_weather.py b/flexmeasures/data/scripts/grid_weather.py index 99b21924c..fa7e1484d 100755 --- a/flexmeasures/data/scripts/grid_weather.py +++ b/flexmeasures/data/scripts/grid_weather.py @@ -12,7 +12,7 @@ from flexmeasures.utils.time_utils import as_server_time, get_timezone from flexmeasures.utils.geo_utils import compute_irradiance -from flexmeasures.data.services.resources import find_closest_weather_sensor +from flexmeasures.data.services.resources import find_closest_sensor from flexmeasures.data.config import db from flexmeasures.data.transactional import task_with_status_report from flexmeasures.data.models.weather import Weather @@ -382,7 +382,7 @@ def save_forecasts_in_db( if needed_response_label in fc: weather_sensor = weather_sensors.get(flexmeasures_sensor_type, None) if weather_sensor is None: - weather_sensor = find_closest_weather_sensor( + weather_sensor = find_closest_sensor( flexmeasures_sensor_type, lat=location[0], lng=location[1] ) if weather_sensor is not None: diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index bc34a8db9..4f68f8593 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -6,6 +6,8 @@ from functools import cached_property, wraps from typing import List, Dict, Tuple, Type, TypeVar, Union, Optional from datetime import datetime + +from flexmeasures.data.queries.sensors import query_sensors_by_proximity from flexmeasures.utils.flexmeasures_inflection import parameterize, pluralize from itertools import groupby @@ -25,7 +27,7 @@ ) from flexmeasures.data.models.markets import Market, Price from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import Weather, WeatherSensor, WeatherSensorType +from flexmeasures.data.models.weather import Weather, WeatherSensorType from flexmeasures.data.models.user import User from flexmeasures.data.queries.utils import simplify_index from flexmeasures.data.services.time_series import aggregate_values @@ -653,10 +655,10 @@ def get_sensor_types(resource: Resource) -> List[WeatherSensorType]: return sensor_types -def find_closest_weather_sensor( - sensor_type: str, n: int = 1, **kwargs -) -> Union[WeatherSensor, List[WeatherSensor], None]: - """Returns the closest n weather sensors of a given type (as a list if n > 1). +def find_closest_sensor( + generic_asset_type_name: str, n: int = 1, **kwargs +) -> Union[Sensor, List[Sensor], None]: + """Returns the closest n sensors of a given type (as a list if n > 1). Parses latitude and longitude values stated in kwargs. Can be called with an object that has latitude and longitude properties, for example: @@ -671,13 +673,16 @@ def find_closest_weather_sensor( """ latitude, longitude = parse_lat_lng(kwargs) - sensors = WeatherSensor.query.filter( - WeatherSensor.weather_sensor_type_name == sensor_type - ).order_by(WeatherSensor.great_circle_distance(lat=latitude, lng=longitude).asc()) if n == 1: - return sensors.first() + return query_sensors_by_proximity( + generic_asset_type_name, latitude, longitude + ).first() else: - return sensors.limit(n).all() + return ( + query_sensors_by_proximity(generic_asset_type_name, latitude, longitude) + .limit(n) + .all() + ) def group_assets_by_location(asset_list: List[Asset]) -> List[List[Asset]]: diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index 1c2e882b4..87191dfe8 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -1,6 +1,7 @@ import pytest from datetime import datetime, timedelta from random import random +from typing import Dict from isodate import parse_duration import pandas as pd @@ -155,3 +156,29 @@ def test_specs(**args): return model_specs, model_identifier, "linear-OLS" model_map["failing-test"] = test_specs + + +@pytest.fixture(scope="module") +def add_nearby_weather_sensors(db, add_weather_sensors) -> Dict[str, WeatherSensor]: + temp_sensor_location = add_weather_sensors["temperature"].location + farther_temp_sensor = WeatherSensor( + name="farther_temperature_sensor", + weather_sensor_type_name="temperature", + event_resolution=timedelta(minutes=5), + latitude=temp_sensor_location[0], + longitude=temp_sensor_location[1] + 0.1, + unit="°C", + ) + even_farther_temp_sensor = WeatherSensor( + name="even_farther_temperature_sensor", + weather_sensor_type_name="temperature", + event_resolution=timedelta(minutes=5), + latitude=temp_sensor_location[0], + longitude=temp_sensor_location[1] + 0.2, + unit="°C", + ) + db.session.add(farther_temp_sensor) + db.session.add(even_farther_temp_sensor) + add_weather_sensors["farther_temperature"] = farther_temp_sensor + add_weather_sensors["even_farther_temperature"] = even_farther_temp_sensor + return add_weather_sensors diff --git a/flexmeasures/data/tests/test_sensor_queries.py b/flexmeasures/data/tests/test_sensor_queries.py new file mode 100644 index 000000000..78c434651 --- /dev/null +++ b/flexmeasures/data/tests/test_sensor_queries.py @@ -0,0 +1,28 @@ +from flexmeasures.data.services.resources import find_closest_sensor + + +def test_closest_sensor(add_nearby_weather_sensors): + """Check that the closest temperature sensor to our wind sensor returns + the one that is on the same spot as the wind sensor itself. + (That's where we set it up in our conftest.) + And check that the 2nd and 3rd closest are the farther temperature sensors we set up. + """ + wind_sensor = add_nearby_weather_sensors["wind"] + generic_asset_type_name = "temperature" + closest_sensors = find_closest_sensor( + generic_asset_type_name, + n=3, + latitude=wind_sensor.latitude, + longitude=wind_sensor.longitude, + ) + assert closest_sensors[0].location == wind_sensor.location + assert ( + closest_sensors[1] + == add_nearby_weather_sensors["farther_temperature"].corresponding_sensor + ) + assert ( + closest_sensors[2] + == add_nearby_weather_sensors["even_farther_temperature"].corresponding_sensor + ) + for sensor in closest_sensors: + assert sensor.generic_asset.generic_asset_type.name == generic_asset_type_name diff --git a/flexmeasures/ui/views/analytics.py b/flexmeasures/ui/views/analytics.py index 73e908c74..881555021 100644 --- a/flexmeasures/ui/views/analytics.py +++ b/flexmeasures/ui/views/analytics.py @@ -16,7 +16,6 @@ from flexmeasures.auth.decorators import account_roles_accepted from flexmeasures.data.models.markets import Market from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import WeatherSensor from flexmeasures.data.services.resources import ( get_assets, get_asset_group_queries, @@ -31,7 +30,6 @@ get_revenues_costs_data, ) from flexmeasures.utils import time_utils -from flexmeasures.utils.flexmeasures_inflection import humanize from flexmeasures.ui.utils.view_utils import ( render_flexmeasures_template, set_session_resource, @@ -390,7 +388,7 @@ def get_data_and_metrics( selected_market_sensor: Sensor, selected_sensor_type, assets, -) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float], str, WeatherSensor]: +) -> Tuple[Dict[str, pd.DataFrame], Dict[str, float], str, Sensor]: """Getting data and calculating metrics for them""" data: Dict[str, pd.DataFrame] = dict() forecast_horizon = pd.to_timedelta(session["forecast_horizon"]) @@ -490,7 +488,7 @@ def get_data_and_metrics( def filter_for_past_data(data): - """ Make sure we only show past data, useful for demo mode """ + """Make sure we only show past data, useful for demo mode""" most_recent_quarter = time_utils.get_most_recent_quarter() if not data["power"].empty: @@ -513,7 +511,7 @@ def filter_for_past_data(data): def filter_forecasts_for_limited_time_window(data): - """ Show forecasts only up to a limited horizon """ + """Show forecasts only up to a limited horizon""" most_recent_quarter = time_utils.get_most_recent_quarter() horizon_days = 10 # keep a 10 day forecast max_forecast_datetime = most_recent_quarter + timedelta(hours=horizon_days * 24) @@ -603,7 +601,7 @@ def make_weather_figure( data: pd.DataFrame, forecast_data: Union[None, pd.DataFrame], shared_x_range: Range1d, - weather_sensor: WeatherSensor, + weather_sensor: Sensor, tools: List[str] = None, sizing_mode="scale_width", ) -> Figure: @@ -616,17 +614,17 @@ def make_weather_figure( ) unit = weather_sensor.unit weather_axis_label = "%s (in %s)" % ( - humanize(weather_sensor.sensor_type.display_name), + weather_sensor.generic_asset.generic_asset_type.description, unit, ) if selected_resource.is_unique_asset: title = "%s at %s" % ( - humanize(weather_sensor.sensor_type.display_name), + weather_sensor.generic_asset.generic_asset_type.description, selected_resource.display_name, ) else: - title = "%s" % humanize(weather_sensor.sensor_type.display_name) + title = "%s" % weather_sensor.generic_asset.generic_asset_type.description return create_graph( data, unit=unit, diff --git a/flexmeasures/utils/geo_utils.py b/flexmeasures/utils/geo_utils.py index aff5088d1..783566eb9 100644 --- a/flexmeasures/utils/geo_utils.py +++ b/flexmeasures/utils/geo_utils.py @@ -1,10 +1,43 @@ from typing import Tuple, Union from datetime import datetime +import math from pvlib.location import Location import pandas as pd +def cos_rad_lat(latitude: float) -> float: + return math.cos(math.radians(latitude)) + + +def sin_rad_lat(latitude: float) -> float: + return math.sin(math.radians(latitude)) + + +def rad_lng(longitude: float) -> float: + return math.radians(longitude) + + +def earth_distance( + location: Tuple[float, float], other_location: Tuple[float, float] +) -> float: + """Great circle distance in km between two locations on Earth.""" + r = 6371 # Radius of Earth in kilometres + _cos_rad_lat = cos_rad_lat(location[0]) + _sin_rad_lat = sin_rad_lat(location[0]) + _rad_lng = rad_lng(location[1]) + other_cos_rad_lat = cos_rad_lat(other_location[0]) + other_sin_rad_lat = sin_rad_lat(other_location[0]) + other_rad_lng = rad_lng(other_location[1]) + return ( + math.acos( + _cos_rad_lat * other_cos_rad_lat * math.cos(_rad_lng - other_rad_lng) + + _sin_rad_lat * other_sin_rad_lat + ) + * r + ) + + def parse_lat_lng(kwargs) -> Union[Tuple[float, float], Tuple[None, None]]: """Parses latitude and longitude values stated in kwargs. From f9dca07aa7b3e746fa11c398d3260bc8077ed363 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 20 Dec 2021 12:00:59 +0100 Subject: [PATCH 15/46] Copy Asset and WeatherSensor locations to GenericAsset (#276) Copy Asset and WeatherSensor locations to GenericAsset (Markets didn't have a location). Sensor attributes get a copy, too, because multiple Sensors under one GenericAsset may have slightly different locations, while the GenericAsset has only one location. * Copy Asset and WeatherSensor locations to GenericAsset (Sensor attributes get a copy, too, because multiple Sensors under one GenericAsset may have slightly different locations, while the GenericAsset has only one location) Signed-off-by: F.N. Claessen * Rename Signed-off-by: F.N. Claessen --- ...es_from_old_data_models_to_GenericAsset.py | 53 ++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py index 1d6966c87..71f158ce3 100644 --- a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py +++ b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py @@ -50,6 +50,8 @@ def upgrade(): sa.MetaData(), sa.Column("id"), sa.Column("generic_asset_type_id"), + sa.Column("latitude"), + sa.Column("longitude"), sa.Column("attributes"), ) t_sensor = sa.Table( @@ -90,6 +92,8 @@ def upgrade(): sa.Column("id"), sa.Column("asset_type_name"), sa.Column("display_name"), # Copy to both Sensor and to GenericAsset + sa.Column("latitude"), # Copy to GenericAsset + sa.Column("longitude"), # Copy to GenericAsset sa.Column("capacity_in_mw"), # Copy to Sensor sa.Column("min_soc_in_mwh"), # Copy to GenericAsset [1] sa.Column("max_soc_in_mwh"), # Copy to GenericAsset [1] @@ -124,6 +128,8 @@ def upgrade(): sa.Column("id"), sa.Column("weather_sensor_type_name"), sa.Column("display_name"), # Copy to both Sensor and to GenericAsset + sa.Column("latitude"), # Copy to GenericAsset + sa.Column("longitude"), # Copy to GenericAsset sa.Column("unit"), # Copy to Sensor [done] sa.Column("event_resolution"), # Copy to Sensor [done] sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done] @@ -183,7 +189,13 @@ def upgrade(): t_generic_asset, t_target=t_sensor, t_old_model_type=t_weather_sensor_type, - old_model_attributes=["id", "weather_sensor_type_name", "display_name"], + old_model_attributes=[ + "id", + "weather_sensor_type_name", + "display_name", + "latitude", + "longitude", + ], extra_attributes={ "daily_seasonality": True, "weekly_seasonality": False, @@ -212,6 +224,8 @@ def upgrade(): "id", "asset_type_name", "display_name", + "latitude", + "longitude", "capacity_in_mw", "market_id", ], @@ -279,6 +293,12 @@ def upgrade(): copy_sensor_columns(connection, t_market, t_sensor) copy_sensor_columns(connection, t_weather_sensor, t_sensor) copy_sensor_columns(connection, t_asset, t_sensor) + copy_location_columns_to_generic_asset( + connection, t_weather_sensor, t_generic_asset, t_sensor + ) + copy_location_columns_to_generic_asset( + connection, t_asset, t_generic_asset, t_sensor + ) def downgrade(): @@ -286,6 +306,37 @@ def downgrade(): op.drop_column("generic_asset", "attributes") +def copy_location_columns_to_generic_asset( + connection, t_old_model, t_generic_asset, t_sensor +): + old_model_attributes = [ + "id", + "latitude", + "longitude", + ] + # Get columns from old model + results = connection.execute( + sa.select([getattr(t_old_model.c, a) for a in old_model_attributes]) + ).fetchall() + + for sensor_id, *args in results: + # Obtain columns we want to copy over, from the old model + old_model_columns_to_copy = { + k: v if not isinstance(v, dict) else json.dumps(v) + for k, v in zip(old_model_attributes[-len(args) :], args) + } + + # Fill in the GenericAsset's columns + connection.execute( + t_generic_asset.update() + .where(t_generic_asset.c.id == t_sensor.c.generic_asset_id) + .where(t_sensor.c.id == sensor_id) + .values( + **old_model_columns_to_copy, + ) + ) + + def copy_sensor_columns(connection, t_old_model, t_sensor): old_model_attributes = [ "id", From e0e440d24c92db9f326e3185be1eae15ea645150 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 20 Dec 2021 12:08:58 +0100 Subject: [PATCH 16/46] Overwrite Sensor.name with the name of the old sensor. (#278) * Copy Asset and WeatherSensor locations to GenericAsset (Sensor attributes get a copy, too, because multiple Sensors under one GenericAsset may have slightly different locations, while the GenericAsset has only one location) Signed-off-by: F.N. Claessen * Overwrite Sensor.name with the name of the old sensor. Signed-off-by: F.N. Claessen --- ...f_copy_attributes_from_old_data_models_to_GenericAsset.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py index 71f158ce3..3612bd57a 100644 --- a/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py +++ b/flexmeasures/data/migrations/versions/6cf5b241b85f_copy_attributes_from_old_data_models_to_GenericAsset.py @@ -58,6 +58,7 @@ def upgrade(): "sensor", sa.MetaData(), sa.Column("id"), + sa.Column("name"), sa.Column("attributes"), sa.Column("generic_asset_id"), sa.Column("unit"), @@ -70,6 +71,7 @@ def upgrade(): sa.MetaData(), sa.Column("id", sa.Integer), sa.Column("market_type_name", sa.String(80)), + sa.Column("name"), # Copy to Sensor sa.Column( "display_name", sa.String(80) ), # Copy to both Sensor and to GenericAsset @@ -91,6 +93,7 @@ def upgrade(): sa.MetaData(), sa.Column("id"), sa.Column("asset_type_name"), + sa.Column("name"), # Copy to Sensor sa.Column("display_name"), # Copy to both Sensor and to GenericAsset sa.Column("latitude"), # Copy to GenericAsset sa.Column("longitude"), # Copy to GenericAsset @@ -127,6 +130,7 @@ def upgrade(): sa.MetaData(), sa.Column("id"), sa.Column("weather_sensor_type_name"), + sa.Column("name"), # Copy to Sensor sa.Column("display_name"), # Copy to both Sensor and to GenericAsset sa.Column("latitude"), # Copy to GenericAsset sa.Column("longitude"), # Copy to GenericAsset @@ -340,6 +344,7 @@ def copy_location_columns_to_generic_asset( def copy_sensor_columns(connection, t_old_model, t_sensor): old_model_attributes = [ "id", + "name", "unit", "event_resolution", "knowledge_horizon_fnc", From f126f882b515f9d4c1a126700dd04328ca6f29b4 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Tue, 21 Dec 2021 09:51:31 +0100 Subject: [PATCH 17/46] Let a Sensor define a custom location in its attributes column. If not specified, the Sensor location defaults to that of the GenericAsset it belongs to. Allow distinct sensor locations per generic asset (#281) * Add missing way to get an attribute from GenericAsset Signed-off-by: F.N. Claessen * Derive sensor location using get_attributes Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/api_utils.py | 3 +-- flexmeasures/data/models/generic_assets.py | 6 +++--- flexmeasures/data/models/time_series.py | 16 +++++----------- flexmeasures/utils/geo_utils.py | 2 ++ 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 3b192cb58..89d7ac7ef 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -333,8 +333,7 @@ def get_sensor_by_generic_asset_type_and_location( ).first() if nearest_weather_sensor is not None: return unrecognized_sensor( - nearest_weather_sensor.latitude, - nearest_weather_sensor.longitude, + *nearest_weather_sensor.location, ) else: return unrecognized_sensor() diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index a36c122a4..343a29bdc 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -59,9 +59,9 @@ class GenericAsset(db.Model): @property def location(self) -> Optional[Tuple[float, float]]: - if None not in (self.latitude, self.longitude): - return self.latitude, self.longitude - return None + location = (self.latitude, self.longitude) + if None not in location: + return location @hybrid_method def great_circle_distance(self, **kwargs): diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 126307090..950a8ad67 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -71,19 +71,11 @@ def __init__( def entity_address(self) -> str: return build_entity_address(dict(sensor_id=self.id), "sensor") - @property - def latitude(self) -> float: - return self.generic_asset.latitude - - @property - def longitude(self) -> float: - return self.generic_asset.longitude - @property def location(self) -> Optional[Tuple[float, float]]: - if None not in (self.latitude, self.longitude): - return self.latitude, self.longitude - return None + location = (self.get_attribute("latitude"), self.get_attribute("longitude")) + if None not in location: + return location @property def is_strictly_non_positive(self) -> bool: @@ -108,6 +100,8 @@ def get_attribute(self, attribute: str, default: Any = None) -> Any: return getattr(self, attribute) if attribute in self.attributes: return self.attributes[attribute] + if hasattr(self.generic_asset, attribute): + return getattr(self.generic_asset, attribute) if attribute in self.generic_asset.attributes: return self.generic_asset.attributes[attribute] return default diff --git a/flexmeasures/utils/geo_utils.py b/flexmeasures/utils/geo_utils.py index 783566eb9..428194809 100644 --- a/flexmeasures/utils/geo_utils.py +++ b/flexmeasures/utils/geo_utils.py @@ -62,6 +62,8 @@ def parse_lat_lng(kwargs) -> Union[Tuple[float, float], Tuple[None, None]]: return obj.latitude, obj.longitude elif hasattr(obj, "lat") and hasattr(obj, "lng"): return obj.lat, obj.lng + elif hasattr(obj, "location"): + return obj.location return None, None From 0d26f7cfd0e42a2d5e197b2d7fcf02b2509e22da Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Tue, 21 Dec 2021 11:07:15 +0100 Subject: [PATCH 18/46] Stop planned (dis)charging after target (or constraint) is reached. (#280) * Stop planned (dis)charging after target is reached. Signed-off-by: F.N. Claessen * Stop planned (dis)charging after constraint is met. Signed-off-by: F.N. Claessen * Refactor Signed-off-by: F.N. Claessen * Be more explicit about initial state Signed-off-by: F.N. Claessen --- flexmeasures/data/models/planning/utils.py | 24 ++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index 1d57ba599..62186c747 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -148,7 +148,7 @@ def fallback_charging_policy( > 0 ): # start charging to get as close as possible to the next target - return charge_schedule + return idle_after_reaching_target(charge_schedule, device_constraints["equals"]) if ( device_constraints["equals"].first_valid_index() is not None and device_constraints["equals"][ @@ -157,18 +157,34 @@ def fallback_charging_policy( < 0 ): # start discharging to get as close as possible to the next target - return discharge_schedule + return idle_after_reaching_target( + discharge_schedule, device_constraints["equals"] + ) if ( device_constraints["max"].first_valid_index() is not None and device_constraints["max"][device_constraints["max"].first_valid_index()] < 0 ): # start discharging to try and bring back the soc below the next max constraint - return discharge_schedule + return idle_after_reaching_target(discharge_schedule, device_constraints["max"]) if ( device_constraints["min"].first_valid_index() is not None and device_constraints["min"][device_constraints["min"].first_valid_index()] > 0 ): # start charging to try and bring back the soc above the next min constraint - return charge_schedule + return idle_after_reaching_target(charge_schedule, device_constraints["min"]) # stand idle return idle_schedule + + +def idle_after_reaching_target( + schedule: pd.Series, + target: pd.Series, + initial_state: float = 0, +) -> pd.Series: + """Stop planned (dis)charging after target is reached (or constraint is met).""" + first_target = target[target.first_valid_index()] + if first_target > initial_state: + schedule[schedule.cumsum() > first_target] = 0 + else: + schedule[schedule.cumsum() < first_target] = 0 + return schedule From 754b46b97f4db24579baa3b8c058f96635bb20ce Mon Sep 17 00:00:00 2001 From: "create-issue-branch[bot]" <53036503+create-issue-branch[bot]@users.noreply.github.com> Date: Tue, 21 Dec 2021 17:59:04 +0100 Subject: [PATCH 19/46] UI: Dashboard using GenericAssets (#251) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create draft PR for #249 * working state of new dashboard; created new file asset_grouping with class AssetGroup, which is aimed to replace resources.Resource; created new view /sensor//state * smaller items from review and add a default ACL for generic assets * move querying assets by type to a data.queries module * make asset type grouping configurable & simplify initialising an AssetGroup * query asset for its state (via one of its power sensors) and also identify power sensors by unit * Dashboard only shows asset groups where at least one asset has a location and power sensors * add missing module * restore grouping by location behaviour for later * move intro text to modal dialogue * make is_power_unit only care about power * include energy assets on dashboard * fix docstring Co-authored-by: nhoening Co-authored-by: Nicolas Höning --- documentation/configuration.rst | 8 + .../api/common/schemas/generic_assets.py | 21 ++ flexmeasures/api/common/schemas/sensors.py | 16 ++ flexmeasures/data/models/generic_assets.py | 71 +++++- flexmeasures/data/models/time_series.py | 28 ++- flexmeasures/data/queries/generic_assets.py | 20 ++ flexmeasures/data/services/asset_grouping.py | 222 +++++++++++++++++ flexmeasures/data/services/resources.py | 5 + flexmeasures/ui/charts/latest_state.py | 115 +++++++++ flexmeasures/ui/templates/base.html | 69 ++++++ .../ui/templates/views/new_dashboard.html | 224 ++++++++++++++++++ flexmeasures/ui/views/__init__.py | 7 +- flexmeasures/ui/views/dashboard.py | 7 +- flexmeasures/ui/views/new_dashboard.py | 65 +++++ flexmeasures/ui/views/state.py | 65 ++++- flexmeasures/utils/config_defaults.py | 4 + flexmeasures/utils/error_utils.py | 2 +- flexmeasures/utils/unit_utils.py | 20 ++ requirements/app.txt | 11 +- 19 files changed, 967 insertions(+), 13 deletions(-) create mode 100644 flexmeasures/api/common/schemas/generic_assets.py create mode 100644 flexmeasures/data/queries/generic_assets.py create mode 100644 flexmeasures/data/services/asset_grouping.py create mode 100644 flexmeasures/ui/charts/latest_state.py create mode 100644 flexmeasures/ui/templates/views/new_dashboard.html create mode 100644 flexmeasures/ui/views/new_dashboard.py diff --git a/documentation/configuration.rst b/documentation/configuration.rst index 226c83e9d..b40e83a93 100644 --- a/documentation/configuration.rst +++ b/documentation/configuration.rst @@ -197,6 +197,14 @@ Interval in which viewing the queues dashboard refreshes itself, in milliseconds Default: ``3000`` (3 seconds) +FLEXMEASURES_ASSET_TYPE_GROUPS +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +How to group asset types together, e.g. in a dashboard. + +Default: ``{"renewables": ["solar", "wind"], "EVSE": ["one-way_evse", "two-way_evse"]}`` + + Timing ------ diff --git a/flexmeasures/api/common/schemas/generic_assets.py b/flexmeasures/api/common/schemas/generic_assets.py new file mode 100644 index 000000000..6bdab3249 --- /dev/null +++ b/flexmeasures/api/common/schemas/generic_assets.py @@ -0,0 +1,21 @@ +from flask import abort +from marshmallow import fields + +from flexmeasures.data.models.generic_assets import GenericAsset + + +class AssetIdField(fields.Integer): + """ + Field that represents a generic asset ID. It de-serializes from the asset id to an asset instance. + """ + + def _deserialize(self, asset_id: int, attr, obj, **kwargs) -> GenericAsset: + asset: GenericAsset = GenericAsset.query.filter_by( + id=int(asset_id) + ).one_or_none() + if asset is None: + raise abort(404, f"GenericAsset {asset_id} not found") + return asset + + def _serialize(self, asset: GenericAsset, attr, data, **kwargs) -> int: + return asset.id diff --git a/flexmeasures/api/common/schemas/sensors.py b/flexmeasures/api/common/schemas/sensors.py index 6e3d288a6..3b4daa65e 100644 --- a/flexmeasures/api/common/schemas/sensors.py +++ b/flexmeasures/api/common/schemas/sensors.py @@ -1,3 +1,4 @@ +from flask import abort from marshmallow import fields from flexmeasures.api import FMValidationError @@ -15,6 +16,21 @@ class EntityAddressValidationError(FMValidationError): status = "INVALID_DOMAIN" # USEF error status +class SensorIdField(fields.Integer): + """ + Field that represents a sensor ID. It de-serializes from the sensor id to a sensor instance. + """ + + def _deserialize(self, sensor_id: int, attr, obj, **kwargs) -> Sensor: + sensor: Sensor = Sensor.query.filter_by(id=int(sensor_id)).one_or_none() + if sensor is None: + raise abort(404, f"Sensor {sensor_id} not found") + return sensor + + def _serialize(self, sensor: Sensor, attr, data, **kwargs) -> int: + return sensor.id + + class SensorField(fields.Str): """Field that de-serializes to a Sensor, and serializes a Sensor, Asset, Market or WeatherSensor into an entity address (string).""" diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index 343a29bdc..a14d2d690 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -1,4 +1,8 @@ -from typing import Optional, Tuple +from typing import Optional, Tuple, List + +from flask_security import current_user +from sqlalchemy.orm import Session +from sqlalchemy.engine import Row from sqlalchemy.ext.hybrid import hybrid_method from sqlalchemy.sql.expression import func @@ -6,6 +10,8 @@ from sqlalchemy.ext.mutable import MutableDict from flexmeasures.data import db +from flexmeasures.data.models.user import User +from flexmeasures.auth.policy import AuthModelMixin from flexmeasures.utils import geo_utils @@ -20,7 +26,7 @@ class GenericAssetType(db.Model): description = db.Column(db.String(80), nullable=True, unique=False) -class GenericAsset(db.Model): +class GenericAsset(db.Model, AuthModelMixin): """An asset is something that has economic value. Examples of tangible assets: a house, a ship, a weather station. @@ -42,6 +48,23 @@ class GenericAsset(db.Model): backref=db.backref("generic_assets", lazy=True), ) + def __acl__(self): + """ + Within same account, everyone can read and update. + Creation and deletion are left to site admins in CLI. + + TODO: needs an iteration + """ + return { + "read": f"account:{self.account_id}", + "update": f"account:{self.account_id}", + } + + @property + def asset_type(self) -> GenericAssetType: + """ This property prepares for dropping the "generic" prefix later""" + return self.generic_asset_type + account_id = db.Column( db.Integer, db.ForeignKey("account.id", ondelete="CASCADE"), nullable=True ) # if null, asset is public @@ -116,6 +139,16 @@ def set_attribute(self, attribute: str, value): if self.has_attribute(attribute): self.attributes[attribute] = value + @property + def has_power_sensors(self) -> bool: + """True if at least one power sensor is attached""" + return any([s.measures_power for s in self.sensors]) + + @property + def has_energy_sensors(self) -> bool: + """True if at least one power energy is attached""" + return any([s.measures_energy for s in self.sensors]) + def create_generic_asset(generic_asset_type: str, **kwargs) -> GenericAsset: """Create a GenericAsset and assigns it an id. @@ -150,3 +183,37 @@ def create_generic_asset(generic_asset_type: str, **kwargs) -> GenericAsset: db.session.add(new_generic_asset) db.session.flush() # generates the pkey for new_generic_asset return new_generic_asset + + +def assets_share_location(assets: List[GenericAsset]) -> bool: + """ + Return True if all assets in this list are located on the same spot. + TODO: In the future, we might soften this to compare if assets are in the same "housing" or "site". + """ + if not assets: + return True + return all([a.location == assets[0].location for a in assets]) + + +def get_center_location_of_assets( + db: Session, user: Optional[User] +) -> Tuple[float, float]: + """ + Find the center position between all generic assets of the user's account. + """ + query = ( + "Select (min(latitude) + max(latitude)) / 2 as latitude," + " (min(longitude) + max(longitude)) / 2 as longitude" + " from generic_asset" + ) + if user is None: + user = current_user + query += f" where generic_asset.account_id = {user.account_id}" + locations: List[Row] = db.session.execute(query + ";").fetchall() + if ( + len(locations) == 0 + or locations[0].latitude is None + or locations[0].longitude is None + ): + return 52.366, 4.904 # Amsterdam, NL + return locations[0].latitude, locations[0].longitude diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 950a8ad67..83ae79641 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -9,6 +9,7 @@ import timely_beliefs as tb import timely_beliefs.utils as tb_utils +from flexmeasures.auth.policy import AuthModelMixin from flexmeasures.data.config import db from flexmeasures.data.queries.utils import ( create_beliefs_query, @@ -20,6 +21,7 @@ aggregate_values, ) from flexmeasures.utils.entity_address_utils import build_entity_address +from flexmeasures.utils.unit_utils import is_energy_unit, is_power_unit from flexmeasures.data.models.charts import chart_type_to_chart_specs from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.generic_assets import GenericAsset @@ -28,7 +30,7 @@ from flexmeasures.utils.flexmeasures_inflection import capitalize -class Sensor(db.Model, tb.SensorDBMixin): +class Sensor(db.Model, tb.SensorDBMixin, AuthModelMixin): """A sensor measures events. """ attributes = db.Column(MutableDict.as_mutable(db.JSON), nullable=False, default={}) @@ -67,6 +69,18 @@ def __init__( kwargs["attributes"] = attributes db.Model.__init__(self, **kwargs) + def __acl__(self): + """ + Within same account, everyone can read and update. + Creation and deletion are left to site admins in CLI. + + TODO: needs an iteration + """ + return { + "read": f"account:{self.generic_asset.account_id}", + "update": f"account:{self.generic_asset.account_id}", + } + @property def entity_address(self) -> str: return build_entity_address(dict(sensor_id=self.id), "sensor") @@ -77,6 +91,16 @@ def location(self) -> Optional[Tuple[float, float]]: if None not in location: return location + @property + def measures_power(self) -> bool: + """True if this sensor's unit is measuring power""" + return is_power_unit(self.unit) + + @property + def measures_energy(self) -> bool: + """True if this sensor's unit is measuring energy""" + return is_energy_unit(self.unit) + @property def is_strictly_non_positive(self) -> bool: """Return True if this sensor strictly records non-positive values.""" @@ -458,6 +482,8 @@ class TimedValue(object): """ A mixin of all tables that store time series data, either forecasts or measurements. Represents one row. + + Note: This will be deprecated in favour of Timely-Beliefs - based code (see Sensor/TimedBelief) """ @declared_attr diff --git a/flexmeasures/data/queries/generic_assets.py b/flexmeasures/data/queries/generic_assets.py new file mode 100644 index 000000000..c3058e811 --- /dev/null +++ b/flexmeasures/data/queries/generic_assets.py @@ -0,0 +1,20 @@ +from typing import List, Union + +from sqlalchemy.orm import Query + +from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType + + +def query_assets_by_type(type_names: Union[List[str], str]) -> Query: + """ + Return a query which looks for GenericAssets by their type. + Pass in a list of type names or only one type name. + """ + query = GenericAsset.query.join(GenericAssetType).filter( + GenericAsset.generic_asset_type_id == GenericAssetType.id + ) + if isinstance(type_names, str): + query = query.filter(GenericAssetType.name == type_names) + else: + query = query.filter(GenericAssetType.name.in_(type_names)) + return query diff --git a/flexmeasures/data/services/asset_grouping.py b/flexmeasures/data/services/asset_grouping.py new file mode 100644 index 000000000..fb9e6ce25 --- /dev/null +++ b/flexmeasures/data/services/asset_grouping.py @@ -0,0 +1,222 @@ +""" +Convenience functions and class for accessing generic assets in groups. +For example, group by asset type or by location. +""" + +from __future__ import annotations +from typing import List, Dict, Optional, Union +import inflect +from itertools import groupby + +from sqlalchemy.orm import Query +from flask_security import current_user +from werkzeug.exceptions import Forbidden +from flexmeasures.auth.policy import ADMIN_ROLE, ADMIN_READER_ROLE + +from flexmeasures.utils.flexmeasures_inflection import parameterize, pluralize +from flexmeasures.data.models.generic_assets import ( + GenericAssetType, + GenericAsset, + assets_share_location, +) +from flexmeasures.data.queries.generic_assets import query_assets_by_type + +p = inflect.engine() + + +def get_asset_group_queries( + custom_additional_groups: Optional[Dict[str, List[str]]] = None, + group_by_location: bool = False, +) -> Dict[str, Query]: + """ + An asset group is defined by Asset queries. Each query has a name, and we prefer pluralised names. + They still need an executive call, like all(), count() or first(). + + This function limits the assets to be queried to the current user's account, + if the user is not an admin. + + Note: Make sure the current user has the "read" permission on his account (on GenericAsset.__class__?? See https://github.com/FlexMeasures/flexmeasures/issues/200). + + :param custom_additional_groups: dict of asset type groupings (mapping group names to names of asset types). See also the setting FLEXMEASURES_ASSET_TYPE_GROUPS. + :param group_by_location: If True, groups will be made for assets at the same location. Naming of the location currently supports charge points (for EVSEs). + """ + asset_queries = {} + + # 1. Custom asset groups by combinations of asset types + if custom_additional_groups: + for asset_type_group_name, asset_types in custom_additional_groups.items(): + asset_queries[asset_type_group_name] = query_assets_by_type(asset_types) + + # 2. We also include a group per asset type - using the pluralised asset type name + for asset_type in GenericAssetType.query.all(): + asset_queries[pluralize(asset_type.name)] = query_assets_by_type( + asset_type.name + ) + + # 3. Finally, we group assets by location + if group_by_location: + asset_queries.update(get_location_queries()) + + if not ( + current_user.has_role(ADMIN_ROLE) or current_user.has_role(ADMIN_READER_ROLE) + ): + # only current user's account + asset_queries = limit_assets_to_account(asset_queries) + + return asset_queries + + +def get_location_queries() -> Dict[str, Query]: + """ + Make queries for grouping assets by location. + + We group EVSE assets by location (if they share a location, they belong to the same Charge Point) + Like get_asset_group_queries, the values in the returned dict still need an executive call, like all(), count() or first(). Note that this function will still load and inspect assets to do its job. + + The Charge Points are named on the basis of the first EVSE in their list, + using either the whole EVSE name or that part that comes before a " -" delimiter. For example: + If: + evse_name = "Seoul Hilton - charger 1" + Then: + charge_point_name = "Seoul Hilton (Charge Point)" + + A Charge Point is a special case. If all assets on a location are of type EVSE, + we can call the location a "Charge Point". + """ + asset_queries = {} + all_assets = GenericAsset.query.all() + loc_groups = group_assets_by_location(all_assets) + for loc_group in loc_groups: + if len(loc_group) == 1: + continue + location_type = "(Location)" + if all( + [ + asset.asset_type.name in ["one-way_evse", "two-way_evse"] + for asset in loc_group + ] + ): + location_type = "(Charge Point)" + location_name = f"{loc_group[0].name.split(' -')[0]} {location_type}" + asset_queries[location_name] = GenericAsset.query.filter( + GenericAsset.name.in_([asset.name for asset in loc_group]) + ) + return asset_queries + + +def limit_assets_to_account( + asset_queries: Union[Query, Dict[str, Query]] +) -> Union[Query, Dict[str, Query]]: + """Filter out any assets that are not in the user's account.""" + if not hasattr(current_user, "account_id"): + raise Forbidden("Unauthenticated user cannot list asset groups.") + if isinstance(asset_queries, dict): + for name, query in asset_queries.items(): + asset_queries[name] = query.filter( + GenericAsset.account_id == current_user.account.id + ) + else: + asset_queries = asset_queries.filter( + GenericAsset.account_id == current_user.account_id + ) + return asset_queries + + +class AssetGroup: + """ + This class represents a group of assets of the same type, offering some convenience functions + for displaying their properties. + + When initialised with an asset type name, the group will contain all assets of + the given type that are accessible to the current user's account. + + When initialised with a query for GenericAssets, as well, the group will list the assets returned by that query. This can be useful in combination with get_asset_group_queries, + see above. + + TODO: On a conceptual level, we can model two functionally useful ways of grouping assets: + - AggregatedAsset if it groups assets of only 1 type, + - GeneralizedAsset if it groups assets of multiple types + There might be specialised subclasses, as well, for certain groups, like a market and consumers. + """ + + name: str + assets: List[GenericAsset] + count: int + unique_asset_types: List[GenericAssetType] + unique_asset_type_names: List[str] + + def __init__(self, name: str, asset_query: Optional[Query] = None): + """ The asset group name is either the name of an asset group or an individual asset. """ + if name is None or name == "": + raise Exception("Empty asset (group) name passed (%s)" % name) + self.name = name + + if not asset_query: + asset_query = GenericAsset.query.filter_by(name=self.name) + + # List unique asset types and asset type names represented by this group + self.assets = asset_query.all() + self.unique_asset_types = list(set([a.asset_type for a in self.assets])) + self.unique_asset_type_names = list( + set([a.asset_type.name for a in self.assets]) + ) + + # Count all assets that are identified by this group's name + self.count = len(self.assets) + + @property + def is_unique_asset(self) -> bool: + """Determines whether the resource represents a unique asset.""" + return [self.name] == [a.name for a in self.assets] + + @property + def display_name(self) -> str: + """Attempt to get a beautiful name to show if possible.""" + if self.is_unique_asset: + return self.assets[0].name + return self.name + + def is_eligible_for_comparing_individual_traces(self, max_traces: int = 7) -> bool: + """ + Decide whether comparing individual traces for assets in this resource + is a useful feature. + The number of assets that can be compared is parametrizable with max_traces. + Plot colors are reused if max_traces > 7, and run out if max_traces > 105. + """ + return len(self.assets) in range(2, max_traces + 1) and assets_share_location( + self.assets + ) + + @property + def hover_label(self) -> Optional[str]: + """Attempt to get a hover label to show if possible.""" + label = p.join( + [ + asset_type.description + for asset_type in self.unique_asset_types + if asset_type.description is not None + ] + ) + return label if label else None + + @property + def parameterized_name(self) -> str: + """Get a parametrized name for use in javascript.""" + return parameterize(self.name) + + def __str__(self): + return self.display_name + + +def group_assets_by_location( + asset_list: List[GenericAsset], +) -> List[List[GenericAsset]]: + groups = [] + + def key_function(x): + return x.location if x.location else () + + sorted_asset_list = sorted(asset_list, key=key_function) + for _k, g in groupby(sorted_asset_list, key=key_function): + groups.append(list(g)) + return groups diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index 4f68f8593..d4a7f8247 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -1,5 +1,10 @@ """ Generic services for accessing asset data. + +TODO: This works with the legacy data model (esp. Assets), so it is marked for deprecation. + We are building data.services.asset_grouping, porting much of the code here. + The data access logic here might also be useful for sensor data access logic we'll build + elsewhere, but that's not quite certain at this point in time. """ from __future__ import annotations diff --git a/flexmeasures/ui/charts/latest_state.py b/flexmeasures/ui/charts/latest_state.py new file mode 100644 index 000000000..8422c5e57 --- /dev/null +++ b/flexmeasures/ui/charts/latest_state.py @@ -0,0 +1,115 @@ +from typing import Tuple +from datetime import datetime + +from flask import current_app +from colour import Color +import pandas as pd +import pandas_bokeh +from bokeh.models import BoxAnnotation + + +from flexmeasures.data.services.time_series import convert_query_window_for_demo +from flexmeasures.utils.time_utils import ( + server_now, + localized_datetime_str, +) +from flexmeasures.data.models.time_series import Sensor + + +def get_latest_power_as_plot(sensor: Sensor, small: bool = False) -> Tuple[str, str]: + """ + Create a plot of a sensor's latest power measurement as an embeddable html string (incl. javascript). + First returned string is the measurement time, second string is the html string. + + Assumes that the sensor has the "capacity_in_mw" attribute. + + TODO: move to Altair. + """ + + if current_app.config.get("FLEXMEASURES_MODE", "") == "play": + before = None # type:ignore + else: + before = server_now() + _, before = convert_query_window_for_demo((before, before)) + + latest_power = sensor.latest_state() + if not latest_power.empty: + # TODO: Get first entry + latest_power_value = latest_power.event_value + if current_app.config.get("FLEXMEASURES_MODE", "") == "demo": + latest_power_datetime = latest_power.belief_time.replace( + year=datetime.now().year + ) + else: + latest_power_datetime = latest_power.belief_time + latest_measurement_time_str = localized_datetime_str( + latest_power_datetime + sensor.event_resolution + ) + else: + latest_power_value = 0 + latest_measurement_time_str = "time unknown" + if latest_power_value < 0: + consumption = True + latest_power_value *= -1 + else: + consumption = False + capacity_in_mw = sensor.get_attribute("capacity_in_mw") + data = { + latest_measurement_time_str if not small else "": [0], + "Capacity in use": [latest_power_value], + "Remaining capacity": [capacity_in_mw - latest_power_value], + } + percentage_capacity = latest_power_value / capacity_in_mw + df = pd.DataFrame(data) + p = df.plot_bokeh( + kind="bar", + x=latest_measurement_time_str if not small else "", + y=["Capacity in use", "Remaining capacity"], + stacked=True, + colormap=[ + "%s" + % Color( + hue=0.3 * min(1.0, 3 / 2 * percentage_capacity), + saturation=1, + luminance=min(0.5, 1 - percentage_capacity * 3 / 4), + ).get_hex_l(), # 0% red, 38% yellow, 67% green, >67% darker green + "#f7ebe7", + ], + alpha=0.7, + title=None, + xlabel=None, + ylabel="Power (%s)" % sensor.unit, + zooming=False, + show_figure=False, + hovertool=None, + legend=None, + toolbar_location=None, + figsize=(200, 400) if not small else (100, 100), + ylim=(0, capacity_in_mw), + xlim=(-0.5, 0.5), + ) + p.xgrid.visible = False + for r in p.renderers: + try: + r.glyph.width = 1 + except AttributeError: + pass + p.xaxis.ticker = [] + p.add_layout(BoxAnnotation(bottom=0, top=capacity_in_mw, fill_color="#f7ebe7")) + plot_html_str = pandas_bokeh.embedded_html(p) + hover_tool_str = "%s at %s %s (%s%% capacity).\nLatest state at %s." % ( + "Consuming" + if consumption + else "Running" + if latest_power_value == 0 + else "Producing", + round(latest_power_value, 3), + sensor.unit, + round(100 * percentage_capacity), + latest_measurement_time_str, + ) + return ( + latest_measurement_time_str, + """
%s
""" + % (hover_tool_str, plot_html_str), + ) diff --git a/flexmeasures/ui/templates/base.html b/flexmeasures/ui/templates/base.html index 7e8cf6d36..335f5546c 100644 --- a/flexmeasures/ui/templates/base.html +++ b/flexmeasures/ui/templates/base.html @@ -255,6 +255,75 @@ . {% endblock copyright_notice %} + {% block about %} + About FlexMeasures. + + + + {% endblock about %} + {% block credits %} Credits. diff --git a/flexmeasures/ui/templates/views/new_dashboard.html b/flexmeasures/ui/templates/views/new_dashboard.html new file mode 100644 index 000000000..b9e2e747b --- /dev/null +++ b/flexmeasures/ui/templates/views/new_dashboard.html @@ -0,0 +1,224 @@ +{% extends "base.html" %} + +{% set active_page = "dashboard" %} + +{% block title %} Dashboard {% endblock %} + +{% block divs %} + +
+
+
+

+ {% if user_is_admin %}{{ FLEXMEASURES_PLATFORM_NAME }} ― asset status: + {% else %}Status of my assets:{% endif%} +

+ +
+ + + + + + {% for asset_group_name in asset_groups if asset_groups[asset_group_name].count > 0 %} + {# On demo, show all non-empty groups, otherwise show all groups that are non-empty for the current user #} + + {% endfor %} + + + + + + + {% for asset_group_name in asset_groups if asset_groups[asset_group_name].count > 0 %} + + {% endfor %} + + {% if not user_is_admin %} + + + + {% for asset_group_name in asset_groups if asset_groups[asset_group_name].count > 0 %} + + {% endfor %} + + {% endif %} + {% if user_is_admin or FLEXMEASURES_MODE == "demo" %} + + + + {% for asset_group_name in asset_groups if asset_groups[asset_group_name].count > 0 %} + + {% endfor %} + + {% endif %} + +
{{ asset_group_name | capitalize }} +
+
+ +
+
My assets: + {{ asset_groups[asset_group_name].count }}
{{ FLEXMEASURES_PLATFORM_NAME }} total: + {{ asset_groups[asset_group_name].count }}
+
+
+
+ + + + + + + + +{{ bokeh_html_embedded | safe }} + + + +{% endblock %} diff --git a/flexmeasures/ui/views/__init__.py b/flexmeasures/ui/views/__init__.py index d427aca78..4614f48f9 100644 --- a/flexmeasures/ui/views/__init__.py +++ b/flexmeasures/ui/views/__init__.py @@ -3,11 +3,14 @@ from flexmeasures.ui import flexmeasures_ui # Now views can register -from flexmeasures.ui.views.dashboard import dashboard_view # noqa: F401 +from flexmeasures.ui.views.dashboard import ( # noqa: F401 + dashboard_view as legacy_dashboard_view, +) +from flexmeasures.ui.views.new_dashboard import new_dashboard_view # noqa: F401 from flexmeasures.ui.views.portfolio import portfolio_view # noqa: F401 from flexmeasures.ui.views.control import control_view # noqa: F401 from flexmeasures.ui.views.analytics import analytics_view # noqa: F401 -from flexmeasures.ui.views.state import state_view # noqa: F401 +from flexmeasures.ui.views.state import sensor_state_view # noqa: F401 from flexmeasures.ui.views.logged_in_user import ( # noqa: F401 # noqa: F401 logged_in_user_view, diff --git a/flexmeasures/ui/views/dashboard.py b/flexmeasures/ui/views/dashboard.py index fbcb553f5..51a881b81 100644 --- a/flexmeasures/ui/views/dashboard.py +++ b/flexmeasures/ui/views/dashboard.py @@ -12,9 +12,12 @@ get_center_location, ) +""" +Note: This view is deprecated. +""" -# Dashboard (default root view, see utils/app_utils.py) -@flexmeasures_ui.route("/dashboard") + +@flexmeasures_ui.route("/legacy-dashboard") @login_required def dashboard_view(): """Dashboard view. diff --git a/flexmeasures/ui/views/new_dashboard.py b/flexmeasures/ui/views/new_dashboard.py new file mode 100644 index 000000000..90623e8b7 --- /dev/null +++ b/flexmeasures/ui/views/new_dashboard.py @@ -0,0 +1,65 @@ +from flask import request, current_app +from flask_security import login_required +from flask_security.core import current_user +from bokeh.resources import CDN + +from flexmeasures.data.config import db +from flexmeasures.ui.views import flexmeasures_ui +from flexmeasures.ui.utils.view_utils import render_flexmeasures_template, clear_session +from flexmeasures.data.models.generic_assets import get_center_location_of_assets +from flexmeasures.data.services.asset_grouping import ( + AssetGroup, + get_asset_group_queries, +) + + +# Dashboard (default root view, see utils/app_utils.py) +@flexmeasures_ui.route("/dashboard") +@login_required +def new_dashboard_view(): + """Dashboard view. + This is the default landing page. + It shows a map with the location of all of the assets in the user's account, + as well as a breakdown of the asset types. + Here, we are only interested in showing assets with power sensors. + Admins get to see all assets. + + TODO: Assets for which the platform has identified upcoming balancing opportunities are highlighted. + """ + msg = "" + if "clear-session" in request.values: + clear_session() + msg = "Your session was cleared." + + aggregate_groups = current_app.config.get("FLEXMEASURES_ASSET_TYPE_GROUPS", {}) + asset_groups = get_asset_group_queries(custom_additional_groups=aggregate_groups) + + map_asset_groups = {} + for asset_group_name, asset_group_query in asset_groups.items(): + asset_group = AssetGroup(asset_group_name, asset_query=asset_group_query) + if any( + [ + a.location and (a.has_power_sensors or a.has_energy_sensors) + for a in asset_group.assets + ] + ): + map_asset_groups[asset_group_name] = asset_group + + # Pack CDN resources (from pandas_bokeh/base.py) + bokeh_html_embedded = "" + for css in CDN.css_files: + bokeh_html_embedded += ( + """\n""" % css + ) + for js in CDN.js_files: + bokeh_html_embedded += """\n""" % js + + return render_flexmeasures_template( + "views/new_dashboard.html", + message=msg, + bokeh_html_embedded=bokeh_html_embedded, + mapboxAccessToken=current_app.config.get("MAPBOX_ACCESS_TOKEN", ""), + map_center=get_center_location_of_assets(db, user=current_user), + asset_groups=map_asset_groups, + aggregate_groups=aggregate_groups, + ) diff --git a/flexmeasures/ui/views/state.py b/flexmeasures/ui/views/state.py index 9c1e710fb..b49510f36 100644 --- a/flexmeasures/ui/views/state.py +++ b/flexmeasures/ui/views/state.py @@ -1,33 +1,90 @@ +from typing import Optional + from flask import request from flask_security import auth_required +from webargs.flaskparser import use_kwargs +from flexmeasures.auth.decorators import permission_required_for_context from flexmeasures.data.models.assets import Asset +from flexmeasures.data.models.generic_assets import GenericAsset +from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.services.resources import can_access_asset -from flexmeasures.ui.utils.plotting_utils import get_latest_power_as_plot +from flexmeasures.api.common.schemas.sensors import SensorIdField +from flexmeasures.api.common.schemas.generic_assets import AssetIdField +from flexmeasures.ui.utils.plotting_utils import ( + get_latest_power_as_plot as legacy_get_latest_power_as_plot, +) +from flexmeasures.utils.unit_utils import is_power_unit +from flexmeasures.ui.charts.latest_state import get_latest_power_as_plot from flexmeasures.ui.views import flexmeasures_ui +@flexmeasures_ui.route("/asset//state/") +@use_kwargs({"asset": AssetIdField(data_key="id")}, location="path") +@permission_required_for_context("read", arg_name="asset") +def asset_state_view(id: str, asset: GenericAsset): + """Asset state view. + This returns a little html snippet with a plot of the most recent state of the + first found power sensor. + TODO: no need to only support power plots in the future. Might make it optional. + """ + power_sensor: Optional[Sensor] = None + for sensor in asset.sensors: + if is_power_unit(sensor.unit): + power_sensor = sensor + break + if power_sensor is None: + return ( + """""" + % id + ) + time_str, plot_html_str = get_latest_power_as_plot(power_sensor, small=True) + return plot_html_str + + +@flexmeasures_ui.route("/sensor//state") +@use_kwargs({"sensor": SensorIdField(data_key="id")}, location="path") +@permission_required_for_context("read", arg_name="sensor") +def sensor_state_view(id: int, sensor: Sensor): + """Sensor state view. + This returns a little html snippet with a plot of the most recent state of the sensor. + Only works for power sensors at the moment. + TODO: no need to only support power plots in the future. Might make it optional. + """ + if not is_power_unit(sensor.unit): + return """"""" + time_str, plot_html_str = get_latest_power_as_plot(sensor, small=True) + return plot_html_str + + @flexmeasures_ui.route("/state") @auth_required() def state_view(): """State view. This returns a little html snippet with a plot of the most recent state of the asset. + + TODO: This is legacy ― it uses the old database model. """ asset_id = request.args.get("id") try: int(asset_id) except ValueError: - return """"""" + # TODO: try Sensor, then Asset (legacy)? asset = Asset.query.filter(Asset.id == asset_id).one_or_none() if asset is None: return """"""" if not can_access_asset(asset): - return """"""" - time_str, plot_html_str = get_latest_power_as_plot(asset, small=True) + time_str, plot_html_str = legacy_get_latest_power_as_plot(asset, small=True) return plot_html_str diff --git a/flexmeasures/utils/config_defaults.py b/flexmeasures/utils/config_defaults.py index 17415c9d7..aa122f163 100644 --- a/flexmeasures/utils/config_defaults.py +++ b/flexmeasures/utils/config_defaults.py @@ -109,6 +109,10 @@ class Config(object): ] FLEXMEASURES_MENU_LISTED_VIEW_ICONS: Dict[str, str] = {} FLEXMEASURES_MENU_LISTED_VIEW_TITLES: Dict[str, str] = {} + FLEXMEASURES_ASSET_TYPE_GROUPS = { + "renewables": ["solar", "wind"], + "EVSE": ["one-way_evse", "two-way_evse"], + } # how to group assets by asset types FLEXMEASURES_LP_SOLVER: str = "cbc" FLEXMEASURES_JOB_TTL: timedelta = timedelta(days=1) FLEXMEASURES_PLANNING_HORIZON: timedelta = timedelta(hours=2 * 24) diff --git a/flexmeasures/utils/error_utils.py b/flexmeasures/utils/error_utils.py index 27c53c612..8fa2ad728 100644 --- a/flexmeasures/utils/error_utils.py +++ b/flexmeasures/utils/error_utils.py @@ -63,7 +63,7 @@ def error_handling_router(error: HTTPException): if hasattr(error, "code"): try: http_error_code = int(error.code) - except ValueError: + except (ValueError, TypeError): # if code is not an int or None pass error_text = getattr( diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index d4e46b53d..48c4523e3 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -20,3 +20,23 @@ def determine_stock_unit(flow_unit: str, time_unit: str = "h"): if flow_unit.endswith(f"/{time_unit}") else f"{flow_unit}{time_unit}" ) + + +def is_power_unit(unit: str) -> bool: + """For example: + >>> is_power_unit("kW") # True + >>> is_power_unit("°C") # False + >>> is_power_unit("kWh") # False + >>> is_power_unit("EUR/kWh") # False + """ + return unit in ("W", "kW", "MW") + + +def is_energy_unit(unit: str) -> bool: + """For example: + >>> is_power_unit("kW") # False + >>> is_power_unit("°C") # False + >>> is_power_unit("kWh") # True + >>> is_power_unit("EUR/kWh") # False + """ + return unit in ("Wh", "kWh", "MWh") diff --git a/requirements/app.txt b/requirements/app.txt index 0bb93b3de..3b2eaa204 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -21,6 +21,10 @@ attrs==21.2.0 # jsonschema # outcome # trio +backports.zoneinfo==0.2.1 + # via + # pytz-deprecation-shim + # tzlocal bcrypt==3.2.0 # via -r requirements/app.in beautifulsoup4==4.10.0 @@ -136,7 +140,10 @@ idna==3.3 importlib-metadata==4.8.1 # via # -r requirements/app.in + # alembic # timely-beliefs +importlib-resources==5.4.0 + # via alembic inflect==5.3.0 # via -r requirements/app.in inflection==0.5.1 @@ -383,7 +390,9 @@ wtforms==2.3.3 xlrd==2.0.1 # via -r requirements/app.in zipp==3.6.0 - # via importlib-metadata + # via + # importlib-metadata + # importlib-resources # The following packages are considered to be unsafe in a requirements file: # setuptools From 8f78b20e0be726f78fca8815c1aaa9840c466dbe Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Thu, 23 Dec 2021 23:16:33 +0100 Subject: [PATCH 20/46] Sub issue 284a initialize timed belief when initializing power/price/weather (#285) Ensure that new data is saved in both the old (Power/Price/Weather) and the new model (TimedBelief). Resolve interference between some tests using day-ahead price data by adjusting the data source of the price data for tests using the new model. * Initialize TimedBelief when initializing Weather Signed-off-by: F.N. Claessen * Add deprecation notice for Weather class Signed-off-by: F.N. Claessen * Initialize TimedBelief when initializing Price, and add deprecation notice for Price class Signed-off-by: F.N. Claessen * Fix test data periodicity Signed-off-by: F.N. Claessen * Resolve test interference by using a different data source in conftest Signed-off-by: F.N. Claessen * Initialize TimedBelief when initializing Power, and add deprecation notice for Power class Signed-off-by: F.N. Claessen * Initialize TimedBelief when initializing forecasts Signed-off-by: F.N. Claessen * Update incorrect test: a shorter scheduling horizon may lead to a different result Signed-off-by: F.N. Claessen --- flexmeasures/api/v1/implementations.py | 11 ++-- flexmeasures/api/v1/tests/conftest.py | 22 ++++--- flexmeasures/api/v1_1/implementations.py | 22 ++++--- flexmeasures/api/v1_1/tests/conftest.py | 33 +++++----- flexmeasures/api/v1_2/tests/test_api_v1_2.py | 8 +-- .../api/v2_0/implementations/sensors.py | 35 +++++----- flexmeasures/conftest.py | 64 +++++++++++-------- flexmeasures/data/models/assets.py | 28 +++++++- flexmeasures/data/models/markets.py | 26 +++++++- flexmeasures/data/models/weather.py | 26 +++++++- flexmeasures/data/scripts/data_gen.py | 44 +++++++------ flexmeasures/data/scripts/grid_weather.py | 16 +++-- flexmeasures/data/services/forecasting.py | 11 ++-- flexmeasures/data/services/scheduling.py | 11 ++-- flexmeasures/data/tests/conftest.py | 22 ++++--- flexmeasures/data/tests/test_queries.py | 12 ++-- .../data/tests/test_time_series_services.py | 4 +- 17 files changed, 247 insertions(+), 148 deletions(-) diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index d8ebe5f43..1bbc3de14 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -301,12 +301,13 @@ def create_connection_and_value_groups( # noqa: C901 (start + duration) - (dt + duration / len(value_group)) ) p = Power( - datetime=dt, - value=value + use_legacy_kwargs=False, + event_start=dt, + event_value=value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption - horizon=h, - sensor_id=sensor_id, - data_source_id=data_source.id, + belief_horizon=h, + sensor=sensor, + source=data_source, ) power_measurements.append(p) diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 88e46e800..7803e7088 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -91,20 +91,22 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices meter_data = [] for i in range(6): p_1 = Power( - datetime=isodate.parse_datetime("2015-01-01T00:00:00Z") + use_legacy_kwargs=False, + event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), - horizon=timedelta(0), - value=(100.0 + i) * -1, - sensor_id=cs_5.id, - data_source_id=user1_data_source.id, + belief_horizon=timedelta(0), + event_value=(100.0 + i) * -1, + sensor=cs_5.corresponding_sensor, + source=user1_data_source, ) p_2 = Power( - datetime=isodate.parse_datetime("2015-01-01T00:00:00Z") + use_legacy_kwargs=False, + event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), - horizon=timedelta(hours=0), - value=(1000.0 - 10 * i) * -1, - sensor_id=cs_5.id, - data_source_id=user2_data_source.id, + belief_horizon=timedelta(hours=0), + event_value=(1000.0 - 10 * i) * -1, + sensor=cs_5.corresponding_sensor, + source=user2_data_source, ) meter_data.append(p_1) meter_data.append(p_2) diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index baec6e3e9..016bcec45 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -114,11 +114,12 @@ def post_price_data_response( (start + duration) - (dt + duration / len(value_group)) ) p = Price( - datetime=dt, - value=value, - horizon=h, - sensor_id=sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=dt, + event_value=value, + belief_horizon=h, + sensor=sensor, + source=data_source, ) prices.append(p) @@ -198,11 +199,12 @@ def post_weather_data_response( # noqa: C901 (start + duration) - (dt + duration / len(value_group)) ) w = Weather( - datetime=dt, - value=value, - horizon=h, - sensor_id=sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=dt, + event_value=value, + belief_horizon=h, + sensor=sensor, + source=data_source, ) weather_measurements.append(w) diff --git a/flexmeasures/api/v1_1/tests/conftest.py b/flexmeasures/api/v1_1/tests/conftest.py index d826b3482..a63d4eb51 100644 --- a/flexmeasures/api/v1_1/tests/conftest.py +++ b/flexmeasures/api/v1_1/tests/conftest.py @@ -58,28 +58,31 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices power_forecasts = [] for i in range(6): p_1 = Power( - datetime=isodate.parse_datetime("2015-01-01T00:00:00Z") + use_legacy_kwargs=False, + event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), - horizon=timedelta(hours=6), - value=(300 + i) * -1, - sensor_id=cs_1.id, - data_source_id=data_source.id, + belief_horizon=timedelta(hours=6), + event_value=(300 + i) * -1, + sensor=cs_1.corresponding_sensor, + source=data_source, ) p_2 = Power( - datetime=isodate.parse_datetime("2015-01-01T00:00:00Z") + use_legacy_kwargs=False, + event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), - horizon=timedelta(hours=6), - value=(300 - i) * -1, - sensor_id=cs_2.id, - data_source_id=data_source.id, + belief_horizon=timedelta(hours=6), + event_value=(300 - i) * -1, + sensor=cs_2.corresponding_sensor, + source=data_source, ) p_3 = Power( - datetime=isodate.parse_datetime("2015-01-01T00:00:00Z") + use_legacy_kwargs=False, + event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), - horizon=timedelta(hours=6), - value=(0 + i) * -1, - sensor_id=cs_3.id, - data_source_id=data_source.id, + belief_horizon=timedelta(hours=6), + event_value=(0 + i) * -1, + sensor=cs_3.corresponding_sensor, + source=data_source, ) power_forecasts.append(p_1) power_forecasts.append(p_2) diff --git a/flexmeasures/api/v1_2/tests/test_api_v1_2.py b/flexmeasures/api/v1_2/tests/test_api_v1_2.py index 4d66b821b..442a2b77f 100644 --- a/flexmeasures/api/v1_2/tests/test_api_v1_2.py +++ b/flexmeasures/api/v1_2/tests/test_api_v1_2.py @@ -27,7 +27,8 @@ def test_get_device_message(client, message): assert get_device_message_response.json["type"] == "GetDeviceMessageResponse" assert len(get_device_message_response.json["values"]) == 192 - # Test that a shorter planning horizon yields the same result + # Test that a shorter planning horizon yields a shorter result + # Note that the scheduler might give a different result, because it doesn't look as far ahead message["duration"] = "PT6H" get_device_message_response_short = client.get( url_for("flexmeasures_api_v1_2.get_device_message"), @@ -36,10 +37,7 @@ def test_get_device_message(client, message): ) print("Server responded with:\n%s" % get_device_message_response_short.json) assert get_device_message_response_short.status_code == 200 - assert ( - get_device_message_response_short.json["values"] - == get_device_message_response.json["values"][0:24] - ) + assert len(get_device_message_response_short.json["values"]) == 24 # Test that a much longer planning horizon yields the same result (when there are only 2 days of prices) message["duration"] = "PT1000H" diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index 7ff25aa36..0264da331 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -99,11 +99,12 @@ def post_price_data_response( # noqa C901 prices.extend( [ Price( - datetime=event_start, - value=event_value, - horizon=belief_horizon, - sensor_id=sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=event_start, + event_value=event_value, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, ) for event_start, event_value, belief_horizon in zip( event_starts, event_values, belief_horizons @@ -173,7 +174,7 @@ def post_weather_data_response( # noqa: C901 if unit not in accepted_units: return invalid_unit(weather_sensor_type_name, accepted_units) - sensor = get_sensor_by_generic_asset_type_and_location( + sensor: Sensor = get_sensor_by_generic_asset_type_and_location( weather_sensor_type_name, latitude, longitude ) @@ -186,11 +187,12 @@ def post_weather_data_response( # noqa: C901 weather_measurements.extend( [ Weather( - datetime=event_start, - value=event_value, - horizon=belief_horizon, - sensor_id=sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=event_start, + event_value=event_value, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, ) for event_start, event_value, belief_horizon in zip( event_starts, event_values, belief_horizons @@ -354,12 +356,13 @@ def post_power_data( power_measurements.extend( [ Power( - datetime=event_start, - value=event_value + use_legacy_kwargs=False, + event_start=event_start, + event_value=event_value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption - horizon=belief_horizon, - sensor_id=sensor_id, - data_source_id=data_source.id, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, ) for event_start, event_value, belief_horizon in zip( event_starts, event_values, belief_horizons diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 4932ebbc4..393a863c6 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -242,9 +242,11 @@ def create_test_markets(db) -> Dict[str, Market]: @pytest.fixture(scope="module") def setup_sources(db) -> Dict[str, DataSource]: - data_source = DataSource(name="Seita", type="demo script") - db.session.add(data_source) - return {"Seita": data_source} + seita_source = DataSource(name="Seita", type="demo script") + db.session.add(seita_source) + entsoe_source = DataSource(name="ENTSO-E", type="demo script") + db.session.add(entsoe_source) + return {"Seita": seita_source, "ENTSO-E": entsoe_source} @pytest.fixture(scope="module") @@ -329,14 +331,18 @@ def setup_assets( time_slots = pd.date_range( datetime(2015, 1, 1), datetime(2015, 1, 1, 23, 45), freq="15T" ) - values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] + values = [ + random() * (1 + np.sin(x * 2 * np.pi / (4 * 24))) + for x in range(len(time_slots)) + ] for dt, val in zip(time_slots, values): p = Power( - datetime=as_server_time(dt), - horizon=parse_duration("PT0M"), - value=val, - data_source_id=setup_sources["Seita"].id, - asset_id=asset.id, + use_legacy_kwargs=False, + event_start=as_server_time(dt), + belief_horizon=parse_duration("PT0M"), + event_value=val, + sensor=asset.corresponding_sensor, + source=setup_sources["Seita"], ) db.session.add(p) return {asset.name: asset for asset in assets} @@ -351,21 +357,21 @@ def setup_beliefs(db: SQLAlchemy, setup_markets, setup_sources) -> int: beliefs = [ TimedBelief( sensor=sensor, - source=setup_sources["Seita"], + source=setup_sources["ENTSO-E"], event_value=21, event_start="2021-03-28 16:00+01", belief_horizon=timedelta(0), ), TimedBelief( sensor=sensor, - source=setup_sources["Seita"], + source=setup_sources["ENTSO-E"], event_value=21, event_start="2021-03-28 17:00+01", belief_horizon=timedelta(0), ), TimedBelief( sensor=sensor, - source=setup_sources["Seita"], + source=setup_sources["ENTSO-E"], event_value=20, event_start="2021-03-28 17:00+01", belief_horizon=timedelta(hours=2), @@ -373,7 +379,7 @@ def setup_beliefs(db: SQLAlchemy, setup_markets, setup_sources) -> int: ), TimedBelief( sensor=sensor, - source=setup_sources["Seita"], + source=setup_sources["ENTSO-E"], event_value=21, event_start="2021-03-28 17:00+01", belief_horizon=timedelta(hours=2), @@ -390,31 +396,35 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources # one day of test data (one complete sine curve) time_slots = pd.date_range( - datetime(2015, 1, 1), datetime(2015, 1, 2), freq="15T", closed="left" + datetime(2015, 1, 1), datetime(2015, 1, 2), freq="1H", closed="left" ) - values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] + values = [ + random() * (1 + np.sin(x * 2 * np.pi / 24)) for x in range(len(time_slots)) + ] for dt, val in zip(time_slots, values): p = Price( - datetime=as_server_time(dt), - horizon=timedelta(hours=0), - value=val, - data_source_id=setup_sources["Seita"].id, - market_id=setup_markets["epex_da"].id, + use_legacy_kwargs=False, + event_start=as_server_time(dt), + belief_horizon=timedelta(hours=0), + event_value=val, + source=setup_sources["Seita"], + sensor=setup_markets["epex_da"].corresponding_sensor, ) db.session.add(p) # another day of test data (8 expensive hours, 8 cheap hours, and again 8 expensive hours) time_slots = pd.date_range( - datetime(2015, 1, 2), datetime(2015, 1, 3), freq="15T", closed="left" + datetime(2015, 1, 2), datetime(2015, 1, 3), freq="1H", closed="left" ) - values = [100] * 8 * 4 + [90] * 8 * 4 + [100] * 8 * 4 + values = [100] * 8 + [90] * 8 + [100] * 8 for dt, val in zip(time_slots, values): p = Price( - datetime=as_server_time(dt), - horizon=timedelta(hours=0), - value=val, - data_source_id=setup_sources["Seita"].id, - sensor_id=setup_markets["epex_da"].id, + use_legacy_kwargs=False, + event_start=as_server_time(dt), + belief_horizon=timedelta(hours=0), + event_value=val, + source=setup_sources["Seita"], + sensor=setup_markets["epex_da"].corresponding_sensor, ) db.session.add(p) diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index 6ce3431c3..b089b1051 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -12,7 +12,7 @@ get_old_model_type, ) from flexmeasures.data.models.user import User -from flexmeasures.data.models.time_series import Sensor, TimedValue +from flexmeasures.data.models.time_series import Sensor, TimedValue, TimedBelief from flexmeasures.data.models.generic_assets import ( create_generic_asset, GenericAsset, @@ -84,7 +84,7 @@ def __repr__(self): class Asset(db.Model, tb.SensorDBMixin): - """Each asset is an energy- consuming or producing hardware. """ + """Each asset is an energy- consuming or producing hardware.""" id = db.Column( db.Integer, db.ForeignKey("sensor.id"), primary_key=True, autoincrement=True @@ -347,7 +347,7 @@ def to_dict(self): "horizon": self.horizon, } - def __init__(self, **kwargs): + def __init__(self, use_legacy_kwargs: bool = True, **kwargs): # todo: deprecate the 'asset_id' argument in favor of 'sensor_id' (announced v0.8.0) if "asset_id" in kwargs and "sensor_id" not in kwargs: kwargs["sensor_id"] = tb_utils.replace_deprecated_argument( @@ -357,6 +357,28 @@ def __init__(self, **kwargs): None, ) kwargs.pop("asset_id", None) + + # todo: deprecate the 'Power' class in favor of 'TimedBelief' (announced v0.8.0) + if use_legacy_kwargs is False: + # Create corresponding TimedBelief + belief = TimedBelief(**kwargs) + db.session.add(belief) + + # Convert key names for legacy model + kwargs["value"] = kwargs.pop("event_value") + kwargs["datetime"] = kwargs.pop("event_start") + kwargs["horizon"] = kwargs.pop("belief_horizon") + kwargs["sensor_id"] = kwargs.pop("sensor").id + kwargs["data_source_id"] = kwargs.pop("source").id + + else: + import warnings + + warnings.warn( + f"The {self.__class__} class is deprecated. Switch to using the TimedBelief class to suppress this warning.", + FutureWarning, + ) + super(Power, self).__init__(**kwargs) def __repr__(self): diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index 83c1ac38f..c12cae9c6 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -15,7 +15,7 @@ copy_old_sensor_attributes, get_old_model_type, ) -from flexmeasures.data.models.time_series import Sensor, TimedValue +from flexmeasures.data.models.time_series import Sensor, TimedValue, TimedBelief from flexmeasures.utils.entity_address_utils import build_entity_address from flexmeasures.utils.flexmeasures_inflection import humanize @@ -203,7 +203,7 @@ def make_query(cls, **kwargs) -> Query: """Construct the database query.""" return super().make_query(**kwargs) - def __init__(self, **kwargs): + def __init__(self, use_legacy_kwargs: bool = True, **kwargs): # todo: deprecate the 'market_id' argument in favor of 'sensor_id' (announced v0.8.0) if "market_id" in kwargs and "sensor_id" not in kwargs: kwargs["sensor_id"] = tb_utils.replace_deprecated_argument( @@ -213,4 +213,26 @@ def __init__(self, **kwargs): None, ) kwargs.pop("market_id", None) + + # todo: deprecate the 'Price' class in favor of 'TimedBelief' (announced v0.8.0) + if use_legacy_kwargs is False: + # Create corresponding TimedBelief + belief = TimedBelief(**kwargs) + db.session.add(belief) + + # Convert key names for legacy model + kwargs["value"] = kwargs.pop("event_value") + kwargs["datetime"] = kwargs.pop("event_start") + kwargs["horizon"] = kwargs.pop("belief_horizon") + kwargs["sensor_id"] = kwargs.pop("sensor").id + kwargs["data_source_id"] = kwargs.pop("source").id + + else: + import warnings + + warnings.warn( + f"The {self.__class__} class is deprecated. Switch to using the TimedBelief class to suppress this warning.", + FutureWarning, + ) + super(Price, self).__init__(**kwargs) diff --git a/flexmeasures/data/models/weather.py b/flexmeasures/data/models/weather.py index d86fe2421..ee3ea53ee 100644 --- a/flexmeasures/data/models/weather.py +++ b/flexmeasures/data/models/weather.py @@ -11,7 +11,7 @@ copy_old_sensor_attributes, get_old_model_type, ) -from flexmeasures.data.models.time_series import Sensor, TimedValue +from flexmeasures.data.models.time_series import Sensor, TimedValue, TimedBelief from flexmeasures.data.models.generic_assets import ( create_generic_asset, GenericAsset, @@ -258,7 +258,29 @@ def make_query(cls, **kwargs) -> Query: """Construct the database query.""" return super().make_query(**kwargs) - def __init__(self, **kwargs): + def __init__(self, use_legacy_kwargs: bool = True, **kwargs): + + # todo: deprecate the 'Weather' class in favor of 'TimedBelief' (announced v0.8.0) + if use_legacy_kwargs is False: + + # Create corresponding TimedBelief + belief = TimedBelief(**kwargs) + db.session.add(belief) + + # Convert key names for legacy model + kwargs["value"] = kwargs.pop("event_value") + kwargs["datetime"] = kwargs.pop("event_start") + kwargs["horizon"] = kwargs.pop("belief_horizon") + kwargs["sensor_id"] = kwargs.pop("sensor").id + kwargs["data_source_id"] = kwargs.pop("source").id + else: + import warnings + + warnings.warn( + f"The {self.__class__} class is deprecated. Switch to using the TimedBelief class to suppress this warning.", + FutureWarning, + ) + super(Weather, self).__init__(**kwargs) def __repr__(self): diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index 2b1ecbc5b..a38cbed2a 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -162,11 +162,12 @@ def add_dummy_tou_market(db: SQLAlchemy): for year in range(2015, 2025): db.session.add( Price( - value=50, - datetime=datetime(year, 1, 1, tzinfo=pytz.utc), - horizon=timedelta(0), - data_source_id=source.id, - sensor_id=market.id, + use_legacy_kwargs=False, + event_value=50, + event_start=datetime(year, 1, 1, tzinfo=pytz.utc), + belief_horizon=timedelta(0), + source=source, + sensor=market.corresponding_sensor, ) ) @@ -321,33 +322,36 @@ def populate_time_series_forecasts( # noqa: C901 if isinstance(old_sensor, Asset): beliefs = [ Power( - datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), - horizon=horizon, - value=value, - asset_id=old_sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), + belief_horizon=horizon, + event_value=value, + sensor=old_sensor.corresponding_sensor, + source=data_source, ) for dt, value in forecasts.items() ] elif isinstance(old_sensor, Market): beliefs = [ Price( - datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), - horizon=horizon, - value=value, - sensor_id=old_sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), + belief_horizon=horizon, + event_value=value, + sensor=old_sensor.corresponding_sensor, + source=data_source, ) for dt, value in forecasts.items() ] elif isinstance(old_sensor, WeatherSensor): beliefs = [ Weather( - datetime=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), - horizon=horizon, - value=value, - sensor_id=old_sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), + belief_horizon=horizon, + event_value=value, + sensor=old_sensor.corresponding_sensor, + source=data_source, ) for dt, value in forecasts.items() ] diff --git a/flexmeasures/data/scripts/grid_weather.py b/flexmeasures/data/scripts/grid_weather.py index fa7e1484d..ace75c637 100755 --- a/flexmeasures/data/scripts/grid_weather.py +++ b/flexmeasures/data/scripts/grid_weather.py @@ -1,7 +1,7 @@ #!/usr/bin/env python import os -from typing import Tuple, List, Dict +from typing import Tuple, List, Dict, Optional import json from datetime import datetime @@ -17,6 +17,7 @@ from flexmeasures.data.transactional import task_with_status_report from flexmeasures.data.models.weather import Weather from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import Sensor FILE_PATH_LOCATION = "/../raw_data/weather-forecasts" DATA_SOURCE_NAME = "OpenWeatherMap" @@ -382,7 +383,7 @@ def save_forecasts_in_db( if needed_response_label in fc: weather_sensor = weather_sensors.get(flexmeasures_sensor_type, None) if weather_sensor is None: - weather_sensor = find_closest_sensor( + weather_sensor: Optional[Sensor] = find_closest_sensor( flexmeasures_sensor_type, lat=location[0], lng=location[1] ) if weather_sensor is not None: @@ -416,11 +417,12 @@ def save_forecasts_in_db( db_forecasts.append( Weather( - datetime=fc_datetime, - horizon=fc_horizon, - value=fc_value, - sensor_id=weather_sensor.id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=fc_datetime, + belief_horizon=fc_horizon, + event_value=fc_value, + sensor=weather_sensor, + source=data_source, ) ) else: diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index b73fbc1e8..1502bada1 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -246,11 +246,12 @@ def make_rolling_viewpoint_forecasts( ts_value_forecasts = [ timed_value_type( - datetime=dt, - horizon=horizon, - value=value, - sensor_id=old_sensor_id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=dt, + belief_horizon=horizon, + event_value=value, + sensor=sensor, + source=data_source, ) for dt, value in forecasts.items() ] diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index c07bcde7b..10928f2ae 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -146,11 +146,12 @@ def make_schedule( ts_value_schedule = [ Power( - datetime=dt, - horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), - value=-value, - sensor_id=asset_id, - data_source_id=data_source.id, + use_legacy_kwargs=False, + event_start=dt, + belief_horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), + event_value=-value, + sensor=sensor, + source=data_source, ) for dt, value in consumption_schedule.items() ] # For consumption schedules, positive values denote consumption. For the db, consumption is negative diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index 87191dfe8..c60816729 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -76,11 +76,12 @@ def setup_fresh_test_data( values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] for dt, val in zip(time_slots, values): p = Power( - datetime=as_server_time(dt), - horizon=parse_duration("PT0M"), - value=val, - data_source_id=data_source.id, - sensor_id=asset.id, + use_legacy_kwargs=False, + event_start=as_server_time(dt), + belief_horizon=parse_duration("PT0M"), + event_value=val, + sensor=asset.corresponding_sensor, + source=data_source, ) db.session.add(p) add_test_weather_sensor_and_forecasts(fresh_db) @@ -131,11 +132,12 @@ def add_test_weather_sensor_and_forecasts(db: SQLAlchemy): for dt, val in zip(time_slots, values): db.session.add( Weather( - sensor_id=sensor.id, - datetime=as_server_time(dt), - value=val, - horizon=timedelta(hours=6), - data_source_id=data_source.id, + use_legacy_kwargs=False, + sensor=sensor.corresponding_sensor, + event_start=as_server_time(dt), + event_value=val, + belief_horizon=timedelta(hours=6), + source=data_source, ) ) diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 2f70a5c8b..0605fa3e0 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -222,13 +222,16 @@ def test_simplify_index(setup_test_data, check_empty_frame): def test_query_beliefs(setup_beliefs): """Check various ways of querying for beliefs.""" sensor = Sensor.query.filter_by(name="epex_da").one_or_none() - source = DataSource.query.filter_by(name="Seita").one_or_none() + source = DataSource.query.filter_by(name="ENTSO-E").one_or_none() bdfs = [ TimedBelief.search(sensor, source=source), TimedBelief.search(sensor.id, source=source), TimedBelief.search(sensor.name, source=source), sensor.search_beliefs(source=source), - tb.BeliefsDataFrame(sensor.beliefs), # doesn't allow filtering + tb.BeliefsDataFrame(sensor.beliefs)[ + tb.BeliefsDataFrame(sensor.beliefs).index.get_level_values("source") + == source + ], ] for bdf in bdfs: assert sensor.event_resolution == timedelta(hours=1) @@ -242,7 +245,8 @@ def test_persist_beliefs(setup_beliefs, setup_test_data): We load the already set up beliefs, and form new beliefs an hour later. """ sensor = Sensor.query.filter_by(name="epex_da").one_or_none() - bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor) + source = DataSource.query.filter_by(name="ENTSO-E").one_or_none() + bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor, source=source) # Form new beliefs df = bdf.reset_index() @@ -253,5 +257,5 @@ def test_persist_beliefs(setup_beliefs, setup_test_data): ) TimedBelief.add(bdf) - bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor) + bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor, source=source) assert len(bdf) == setup_beliefs * 2 diff --git a/flexmeasures/data/tests/test_time_series_services.py b/flexmeasures/data/tests/test_time_series_services.py index 32a36f144..5125e7c48 100644 --- a/flexmeasures/data/tests/test_time_series_services.py +++ b/flexmeasures/data/tests/test_time_series_services.py @@ -68,7 +68,7 @@ def test_do_not_drop_changed_probabilistic_belief(setup_beliefs): # Set a reference for the number of beliefs stored sensor = Sensor.query.filter_by(name="epex_da").one_or_none() - bdf = sensor.search_beliefs(source="Seita") + bdf = sensor.search_beliefs(source="ENTSO-E") num_beliefs_before = len(bdf) # See what happens when storing a belief with more certainty one hour later @@ -91,6 +91,6 @@ def test_do_not_drop_changed_probabilistic_belief(setup_beliefs): save_to_db(new_belief) # Verify that the whole probabilistic belief was added - bdf = sensor.search_beliefs(source="Seita") + bdf = sensor.search_beliefs(source="ENTSO-E") num_beliefs_after = len(bdf) assert num_beliefs_after == num_beliefs_before + len(new_belief) From dc9ecfa6c2fdbe503aa4d699011b3eb2605ca2ad Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 27 Dec 2021 13:52:15 +0100 Subject: [PATCH 21/46] Issue 282 use pint to check and convert units (#283) Prepare for future user functionality to auto-convert data to desired units when POSTing and GETting data, and hook up unit conversion to our dev endpoint for posting sensor data. * Use pint for unit_utils, using h to denote hour, adding the world's currencies, and simplifying units according to preference; also add tests Signed-off-by: F.N. Claessen * Add new dependencies to requirements Signed-off-by: F.N. Claessen * Fix docstring: cubic unit with unicode Signed-off-by: F.N. Claessen * Fix docstring examples Signed-off-by: F.N. Claessen * Convert units and data in SensorDataSchema Signed-off-by: F.N. Claessen * Fix case of simple multiplier Signed-off-by: F.N. Claessen * Test more unit conversions Signed-off-by: F.N. Claessen * Refactor unit utils Signed-off-by: F.N. Claessen * Fix converting offset units (such as degrees Celsius) Signed-off-by: F.N. Claessen * Extra testing of unit util functions, incl. conversion of offset units Signed-off-by: F.N. Claessen * Add missing docstring Signed-off-by: F.N. Claessen * Black Signed-off-by: F.N. Claessen * Add inline note Signed-off-by: F.N. Claessen * Clarify what test util function does Signed-off-by: F.N. Claessen * Add missing type annotation Signed-off-by: F.N. Claessen --- .../api/common/schemas/sensor_data.py | 39 +++++- .../api/dev/tests/test_sensor_data.py | 6 +- .../dev/tests/test_sensor_data_fresh_db.py | 31 +++-- flexmeasures/api/dev/tests/utils.py | 9 +- flexmeasures/utils/tests/test_unit_utils.py | 112 ++++++++++++++++ flexmeasures/utils/unit_utils.py | 126 +++++++++++++++--- requirements/app.in | 2 + requirements/app.txt | 24 ++-- 8 files changed, 301 insertions(+), 48 deletions(-) create mode 100644 flexmeasures/utils/tests/test_unit_utils.py diff --git a/flexmeasures/api/common/schemas/sensor_data.py b/flexmeasures/api/common/schemas/sensor_data.py index 782dc2640..803590181 100644 --- a/flexmeasures/api/common/schemas/sensor_data.py +++ b/flexmeasures/api/common/schemas/sensor_data.py @@ -13,6 +13,10 @@ from flexmeasures.api.common.schemas.sensors import SensorField from flexmeasures.api.common.utils.api_utils import upsample_values from flexmeasures.data.schemas.times import AwareDateTimeField, DurationField +from flexmeasures.utils.unit_utils import ( + determine_unit_conversion_multiplier, + units_are_convertible, +) class SingleValueField(fields.Float): @@ -81,12 +85,21 @@ def check_user_rights_against_sensor(self, data, **kwargs): @validates_schema def check_schema_unit_against_sensor_unit(self, data, **kwargs): - # TODO: technically, there are compatible units, like kWh and kW. - # They could be allowed here, and the SensorDataSchema could - # even convert values to the sensor's unit if possible. - if data["unit"] != data["sensor"].unit: + """Allows units compatible with that of the sensor. + For example, a sensor with W units allows data to be posted with units: + - W, kW, MW, etc. (i.e. units with different prefixes) + - J/s, Nm/s, etc. (i.e. units that can be converted using some multiplier) + - Wh, kWh, etc. (i.e. units that represent a stock delta, which knowing the duration can be converted to a flow) + For compatible units, the SensorDataSchema converts values to the sensor's unit. + """ + posted_unit = data["unit"] + required_unit = data["sensor"].unit + + if posted_unit != required_unit and not units_are_convertible( + posted_unit, required_unit + ): raise ValidationError( - f"Required unit for this sensor is {data['sensor'].unit}, got: {data['unit']}" + f"Required unit for this sensor is {data['sensor'].unit}, got incompatible unit: {data['unit']}" ) @@ -120,6 +133,22 @@ def check_resolution_compatibility_of_values(self, data, **kwargs): f"Resolution of {inferred_resolution} is incompatible with the sensor's required resolution of {required_resolution}." ) + @post_load() + def possibly_convert_units(self, data, **kwargs): + """ + Convert values if needed, to fit the sensor's unit. + Marshmallow runs this after validation. + """ + posted_unit = data["unit"] + required_unit = data["sensor"].unit + + if posted_unit != required_unit: + multiplier = determine_unit_conversion_multiplier( + posted_unit, required_unit, data["sensor"].event_resolution + ) + data["values"] = [multiplier * value for value in data["values"]] + return data + @post_load() def possibly_upsample_values(self, data, **kwargs): """ diff --git a/flexmeasures/api/dev/tests/test_sensor_data.py b/flexmeasures/api/dev/tests/test_sensor_data.py index 9138b966e..6dcf5c614 100644 --- a/flexmeasures/api/dev/tests/test_sensor_data.py +++ b/flexmeasures/api/dev/tests/test_sensor_data.py @@ -2,7 +2,7 @@ import pytest from flexmeasures.api.tests.utils import get_auth_token -from flexmeasures.api.dev.tests.utils import make_sensor_data_request +from flexmeasures.api.dev.tests.utils import make_sensor_data_request_for_gas_sensor @pytest.mark.parametrize("use_auth", [False, True]) @@ -48,7 +48,7 @@ def test_post_sensor_data_bad_auth(client, setup_api_test_data, use_auth): def test_post_invalid_sensor_data( client, setup_api_test_data, request_field, new_value, error_field, error_text ): - post_data = make_sensor_data_request() + post_data = make_sensor_data_request_for_gas_sensor() post_data[request_field] = new_value # this guy is allowed to post sensorData auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") @@ -64,7 +64,7 @@ def test_post_invalid_sensor_data( def test_post_sensor_data_twice(client, setup_api_test_data): auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") - post_data = make_sensor_data_request() + post_data = make_sensor_data_request_for_gas_sensor() response = client.post( url_for("post_sensor_data"), json=post_data, diff --git a/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py b/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py index e4aec772a..7b78fa493 100644 --- a/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py +++ b/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py @@ -3,22 +3,36 @@ from flask import url_for from flexmeasures.api.tests.utils import get_auth_token -from flexmeasures.api.dev.tests.utils import make_sensor_data_request +from flexmeasures.api.dev.tests.utils import make_sensor_data_request_for_gas_sensor from flexmeasures.data.models.time_series import TimedBelief, Sensor @pytest.mark.parametrize( - "num_values, expected_num_values", + "num_values, expected_num_values, unit, expected_value", [ - (6, 6), - (3, 6), # upsample - (1, 6), # upsample single value sent as float rather than list of floats + (6, 6, "m³/h", -11.28), + (6, 6, "m³", 6 * -11.28), # 6 * 10-min intervals per hour + (6, 6, "l/h", -11.28 / 1000), # 1 m³ = 1000 l + (3, 6, "m³/h", -11.28), # upsample to 20-min intervals + ( + 1, + 6, + "m³/h", + -11.28, + ), # upsample to single value for 1-hour interval, sent as float rather than list of floats ], ) def test_post_sensor_data( - client, setup_api_fresh_test_data, num_values, expected_num_values + client, + setup_api_fresh_test_data, + num_values, + expected_num_values, + unit, + expected_value, ): - post_data = make_sensor_data_request(num_values=num_values) + post_data = make_sensor_data_request_for_gas_sensor( + num_values=num_values, unit=unit + ) sensor = Sensor.query.filter(Sensor.name == "some gas sensor").one_or_none() beliefs_before = TimedBelief.query.filter(TimedBelief.sensor_id == sensor.id).all() print(f"BELIEFS BEFORE: {beliefs_before}") @@ -35,4 +49,5 @@ def test_post_sensor_data( beliefs = TimedBelief.query.filter(TimedBelief.sensor_id == sensor.id).all() print(f"BELIEFS AFTER: {beliefs}") assert len(beliefs) == expected_num_values - assert beliefs[0].event_value == -11.28 + # check that values are scaled to the sensor unit correctly + assert pytest.approx(beliefs[0].event_value - expected_value) == 0 diff --git a/flexmeasures/api/dev/tests/utils.py b/flexmeasures/api/dev/tests/utils.py index 89ef508d8..8d655dc6a 100644 --- a/flexmeasures/api/dev/tests/utils.py +++ b/flexmeasures/api/dev/tests/utils.py @@ -1,7 +1,12 @@ from flexmeasures.data.models.time_series import Sensor -def make_sensor_data_request(num_values: int = 6, duration: str = "PT1H") -> dict: +def make_sensor_data_request_for_gas_sensor( + num_values: int = 6, duration: str = "PT1H", unit: str = "m³" +) -> dict: + """Creates request to post sensor data for a gas sensor. + This particular gas sensor measures units of m³/h with a 10-minute resolution. + """ sensor = Sensor.query.filter(Sensor.name == "some gas sensor").one_or_none() message: dict = { "type": "PostSensorDataRequest", @@ -9,7 +14,7 @@ def make_sensor_data_request(num_values: int = 6, duration: str = "PT1H") -> dic "values": num_values * [-11.28], "start": "2021-06-07T00:00:00+02:00", "duration": duration, - "unit": "m³/h", + "unit": unit, } if num_values == 1: # flatten [] to diff --git a/flexmeasures/utils/tests/test_unit_utils.py b/flexmeasures/utils/tests/test_unit_utils.py new file mode 100644 index 000000000..bbe9dbfcd --- /dev/null +++ b/flexmeasures/utils/tests/test_unit_utils.py @@ -0,0 +1,112 @@ +from datetime import timedelta +import pytest + +from flexmeasures.utils.unit_utils import ( + determine_flow_unit, + determine_stock_unit, + determine_unit_conversion_multiplier, + units_are_convertible, + is_energy_unit, + is_power_unit, + u, +) + + +@pytest.mark.parametrize( + "unit, time_unit, expected_unit", + [ + ("m³", None, "m³/h"), + ("kWh", None, "kW"), + ("km", "h", "km/h"), + ("m", "s", "km/h"), + ], +) +def test_determine_flow_unit( + unit, + time_unit, + expected_unit, +): + if time_unit is None: + assert determine_flow_unit(unit) == expected_unit + else: + assert determine_flow_unit(unit, time_unit) == expected_unit + + +@pytest.mark.parametrize( + "unit, time_unit, expected_unit", + [ + ("m³/h", None, "m³"), + ("kW", None, "kWh"), + ("m/s", "s", "m"), + ("m/s", "h", "km"), + ], +) +def test_determine_stock_unit( + unit, + time_unit, + expected_unit, +): + if time_unit is None: + assert determine_stock_unit(unit) == expected_unit + else: + assert determine_stock_unit(unit, time_unit) == expected_unit + + +def test_determine_unit_conversion_multiplier(): + assert determine_unit_conversion_multiplier("kW", "W") == 1000 + assert determine_unit_conversion_multiplier("J/s", "W") == 1 + assert determine_unit_conversion_multiplier("Wh", "W", timedelta(minutes=10)) == 6 + assert determine_unit_conversion_multiplier("kWh", "MJ") == 3.6 + assert determine_unit_conversion_multiplier("°C", "K") == 274.15 + + +def test_h_denotes_hour_and_not_planck_constant(): + assert u.Quantity("h").dimensionality == u.Quantity("hour").dimensionality + assert ( + u.Quantity("hbar").dimensionality + == u.Quantity("planck_constant").dimensionality + ) + + +def test_units_are_convertible(): + assert units_are_convertible("kW", "W") # units just have different prefixes + assert units_are_convertible( + "J/s", "W" + ) # units can be converted using some multiplier + assert units_are_convertible( + "Wh", "W" + ) # units that represent a stock delta can, knowing the duration, be converted to a flow + assert units_are_convertible("toe", "W") # tonne of oil equivalent + assert units_are_convertible("°C", "K") # offset unit to absolute unit + assert not units_are_convertible("°C", "W") + assert not units_are_convertible("EUR/MWh", "W") + + +@pytest.mark.parametrize( + "unit, power_unit", + [ + ("EUR/MWh", False), + ("KRW/kWh", False), + ("kWh", False), + ("kW", True), + ("watt", True), + ("°C", False), + ], +) +def test_is_power_unit(unit: str, power_unit: bool): + assert is_power_unit(unit) is power_unit + + +@pytest.mark.parametrize( + "unit, energy_unit", + [ + ("EUR/MWh", False), + ("KRW/kWh", False), + ("kWh", True), + ("kW", False), + ("watthour", True), + ("°C", False), + ], +) +def test_is_energy_unit(unit: str, energy_unit: bool): + assert is_energy_unit(unit) is energy_unit diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index 48c4523e3..390249996 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -1,25 +1,113 @@ +from datetime import timedelta +from typing import Optional + +from moneyed import list_all_currencies +import importlib.resources as pkg_resources +import pint + +# Edit constants template to stop using h to represent planck_constant +constants_template = ( + pkg_resources.read_text(pint, "constants_en.txt") + .replace("= h ", " ") + .replace(" h ", " planck_constant ") +) + +# Edit units template to use h to represent hour instead of planck_constant +units_template = ( + pkg_resources.read_text(pint, "default_en.txt") + .replace("@import constants_en.txt", "") + .replace(" h ", " planck_constant ") + .replace("hour = 60 * minute = hr", "hour = 60 * minute = h = hr") +) + +# Create custom template +custom_template = [f"{c} = [currency_{c}]" for c in list_all_currencies()] + +# Join templates as iterable object +full_template = ( + constants_template.split("\n") + units_template.split("\n") + custom_template +) + +# Set up UnitRegistry with abbreviated scientific format +u = pint.UnitRegistry(full_template) +u.default_format = "~P" # short pretty + + +PREFERRED_UNITS = [ + "m", + "h", + "kg", + "m/h", + "W", + "N", + "Wh", + "m**2", + "m**3", + "V", + "A", + "dimensionless", +] # todo: move to config setting, with these as a default (NB prefixes do not matter here, this is about SI base units, so km/h is equivalent to m/h) +PREFERRED_UNITS_DICT = dict([(u[x].dimensionality, x) for x in PREFERRED_UNITS]) + + +def to_preferred(x: pint.Quantity) -> pint.Quantity: + """From https://github.com/hgrecco/pint/issues/676#issuecomment-689157693""" + dim = x.dimensionality + if dim in PREFERRED_UNITS_DICT: + return x.to(PREFERRED_UNITS_DICT[dim]).to_compact() + return x + + +def determine_unit_conversion_multiplier( + from_unit: str, to_unit: str, duration: Optional[timedelta] = None +): + """Determine the value multiplier for a given unit conversion. + If needed, requires a duration to convert from units of stock change to units of flow. + """ + scalar = u.Quantity(from_unit).to_base_units() / u.Quantity(to_unit).to_base_units() + if scalar.dimensionality == u.Quantity("h").dimensionality: + if duration is None: + raise ValueError( + f"Cannot convert units from {from_unit} to {to_unit} without known duration." + ) + return scalar.to_timedelta() / duration + return scalar.to_reduced_units().magnitude + + def determine_flow_unit(stock_unit: str, time_unit: str = "h"): """For example: - >>> determine_flow_unit("m3") # m3/h + >>> determine_flow_unit("m³") # m³/h >>> determine_flow_unit("kWh") # kW """ - return ( - stock_unit.rpartition(time_unit)[0] - if stock_unit.endswith(time_unit) - else f"{stock_unit}/{time_unit}" - ) + flow = to_preferred(u.Quantity(stock_unit) / u.Quantity(time_unit)) + return "{:~P}".format(flow.units) def determine_stock_unit(flow_unit: str, time_unit: str = "h"): """For example: - >>> determine_stock_unit("m3/h") # m3 + >>> determine_stock_unit("m³/h") # m³ >>> determine_stock_unit("kW") # kWh """ - return ( - flow_unit.rpartition(f"/{time_unit}")[0] - if flow_unit.endswith(f"/{time_unit}") - else f"{flow_unit}{time_unit}" - ) + stock = to_preferred(u.Quantity(flow_unit) * u.Quantity(time_unit)) + return "{:~P}".format(stock.units) + + +def units_are_convertible( + from_unit: str, to_unit: str, duration_known: bool = True +) -> bool: + """For example, a sensor with W units allows data to be posted with units: + >>> units_are_convertible("kW", "W") # True (units just have different prefixes) + >>> units_are_convertible("J/s", "W") # True (units can be converted using some multiplier) + >>> units_are_convertible("Wh", "W") # True (units that represent a stock delta can, knowing the duration, be converted to a flow) + >>> units_are_convertible("°C", "W") # False + """ + scalar = u.Quantity(from_unit).to_base_units() / u.Quantity(to_unit).to_base_units() + if duration_known: + return scalar.dimensionality in ( + u.Quantity("h").dimensionality, + u.Quantity("dimensionless").dimensionality, + ) + return scalar.dimensionality == u.Quantity("dimensionless").dimensionality def is_power_unit(unit: str) -> bool: @@ -27,16 +115,16 @@ def is_power_unit(unit: str) -> bool: >>> is_power_unit("kW") # True >>> is_power_unit("°C") # False >>> is_power_unit("kWh") # False - >>> is_power_unit("EUR/kWh") # False + >>> is_power_unit("EUR/MWh") # False """ - return unit in ("W", "kW", "MW") + return u.Quantity(unit).dimensionality == u.Quantity("W").dimensionality def is_energy_unit(unit: str) -> bool: """For example: - >>> is_power_unit("kW") # False - >>> is_power_unit("°C") # False - >>> is_power_unit("kWh") # True - >>> is_power_unit("EUR/kWh") # False + >>> is_energy_unit("kW") # False + >>> is_energy_unit("°C") # False + >>> is_energy_unit("kWh") # True + >>> is_energy_unit("EUR/MWh") # False """ - return unit in ("Wh", "kWh", "MWh") + return u.Quantity(unit).dimensionality == u.Quantity("Wh").dimensionality diff --git a/requirements/app.in b/requirements/app.in index 8b791b450..e599a9a94 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -6,6 +6,8 @@ pscript pandas # pandas-bokeh 0.5 requires bokeh>=2.0, but bokeh still doesn't support sharing a legend across plots pandas-bokeh==0.4.3 +pint +py-moneyed iso8601 xlrd inflection diff --git a/requirements/app.txt b/requirements/app.txt index 3b2eaa204..932205054 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -21,10 +21,8 @@ attrs==21.2.0 # jsonschema # outcome # trio -backports.zoneinfo==0.2.1 - # via - # pytz-deprecation-shim - # tzlocal +babel==2.9.1 + # via py-moneyed bcrypt==3.2.0 # via -r requirements/app.in beautifulsoup4==4.10.0 @@ -140,10 +138,7 @@ idna==3.3 importlib-metadata==4.8.1 # via # -r requirements/app.in - # alembic # timely-beliefs -importlib-resources==5.4.0 - # via alembic inflect==5.3.0 # via -r requirements/app.in inflection==0.5.1 @@ -220,7 +215,9 @@ openturns==1.17 outcome==1.1.0 # via trio packaging==21.2 - # via bokeh + # via + # bokeh + # pint pandas==1.2.5 # via # -r requirements/app.in @@ -241,6 +238,8 @@ pillow==8.4.0 # via # bokeh # matplotlib +pint==0.18 + # via -r requirements/app.in ply==3.11 # via pyomo properscoring==0.1 @@ -255,6 +254,8 @@ psycopg2-binary==2.9.1 # timely-beliefs pvlib==0.9.0 # via -r requirements/app.in +py-moneyed==2.0 + # via -r requirements/app.in pycparser==2.20 # via cffi pyomo==6.1.2 @@ -279,6 +280,7 @@ python-dotenv==0.19.1 pytz==2021.3 # via # -r requirements/app.in + # babel # pandas # pvlib # timely-beliefs @@ -370,6 +372,8 @@ trio==0.19.0 # trio-websocket trio-websocket==0.9.2 # via selenium +typing-extensions==4.0.1 + # via py-moneyed tzdata==2021.5 # via pytz-deprecation-shim tzlocal==4.1 @@ -390,9 +394,7 @@ wtforms==2.3.3 xlrd==2.0.1 # via -r requirements/app.in zipp==3.6.0 - # via - # importlib-metadata - # importlib-resources + # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools From f651eea64e77439a5402195a31f808f027de9f27 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Wed, 29 Dec 2021 21:03:36 +0100 Subject: [PATCH 22/46] Sub issue 284b query timed belief rather than power/price/weather (#287) Move time series queries to the new data model, and also have forecasting and scheduling jobs directly create TimedBeliefs. * Query TimedBelief rather than Power in api v1.3 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1.3 implementations Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in user services tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in query tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in forecasting tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in scheduling tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 tests Signed-off-by: F.N. Claessen * Simplify data deletion, like, by a lot Signed-off-by: F.N. Claessen * Count ex-ante TimedBeliefs after populating time series forecasts Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price in api v1_1 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in Resource.load_sensor_data Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in api v2.0 tests Signed-off-by: F.N. Claessen * Refactor: simplify duplicate query construction Signed-off-by: F.N. Claessen * Add custom join target to get rid of SA warning Signed-off-by: F.N. Claessen * Filter criteria should work for both TimedBeliefs and TimedValues Signed-off-by: F.N. Claessen * Clarify docstring Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 implementations Signed-off-by: F.N. Claessen * Schedules should contain one deterministic belief per event Signed-off-by: F.N. Claessen * Fix type annotation Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price/Weather for analytics Signed-off-by: F.N. Claessen * Query deterministic TimedBelief rather than Price for planning queries Signed-off-by: F.N. Claessen * Forecast TimedBelief rather than Power/Price/Weather Signed-off-by: F.N. Claessen * Schedule TimedBelief rather than Power Signed-off-by: F.N. Claessen * Apparently, to initialize a TimedBelief is to save a TimedBelief, too Signed-off-by: F.N. Claessen * Create TimedBelief rather than Power/Price/Weather in data generation script Signed-off-by: F.N. Claessen * Bump timely-beliefs dependency Signed-off-by: F.N. Claessen * Fix latest state query Signed-off-by: F.N. Claessen * Revert "Apparently, to initialize a TimedBelief is to save a TimedBelief, too" This reverts commit fb58ec7459dd69c571bee27cdce61e67c14617ae. * Prevent saving TimedBelief to session upon updating Sensor or Source Signed-off-by: F.N. Claessen * Expand inline note Signed-off-by: F.N. Claessen * Refactor: simplify block in data_gen.py Signed-off-by: F.N. Claessen * Add back test and fix it by selecting only the most recent beliefs when resampling, and by solving a bug in a deprecated optional argument. Also, remove redundant index conversions in test, and move one conversion closer to where it is needed Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/validators.py | 2 +- flexmeasures/api/v1/implementations.py | 9 +- flexmeasures/api/v1/tests/test_api_v1.py | 18 +- flexmeasures/api/v1/tests/utils.py | 24 +- flexmeasures/api/v1_1/implementations.py | 2 - flexmeasures/api/v1_1/tests/test_api_v1_1.py | 9 +- .../api/v1_1/tests/test_api_v1_1_fresh_db.py | 2 +- flexmeasures/api/v1_1/tests/utils.py | 32 ++- flexmeasures/api/v1_3/implementations.py | 50 +--- flexmeasures/api/v1_3/tests/test_api_v1_3.py | 11 +- .../api/v2_0/implementations/sensors.py | 3 - .../tests/test_api_v2_0_sensors_fresh_db.py | 2 - flexmeasures/api/v2_0/tests/utils.py | 61 ++--- flexmeasures/cli/data_add.py | 13 - flexmeasures/cli/data_delete.py | 56 +--- flexmeasures/cli/testing.py | 2 - .../models/forecasting/model_spec_factory.py | 18 +- flexmeasures/data/models/forecasting/utils.py | 10 +- flexmeasures/data/models/planning/utils.py | 7 +- flexmeasures/data/models/time_series.py | 41 ++- flexmeasures/data/models/utils.py | 18 -- flexmeasures/data/queries/analytics.py | 15 +- flexmeasures/data/queries/utils.py | 10 +- flexmeasures/data/scripts/data_gen.py | 257 +++--------------- flexmeasures/data/services/forecasting.py | 38 +-- flexmeasures/data/services/resources.py | 6 +- flexmeasures/data/services/scheduling.py | 27 +- .../data/tests/test_forecasting_jobs.py | 42 ++- .../tests/test_forecasting_jobs_fresh_db.py | 32 +-- flexmeasures/data/tests/test_queries.py | 31 ++- .../data/tests/test_scheduling_jobs.py | 9 +- .../tests/test_scheduling_jobs_fresh_db.py | 11 +- flexmeasures/data/tests/test_user_services.py | 11 +- flexmeasures/ui/charts/latest_state.py | 11 +- requirements/app.in | 2 +- requirements/app.txt | 2 +- 36 files changed, 296 insertions(+), 598 deletions(-) delete mode 100644 flexmeasures/data/models/utils.py diff --git a/flexmeasures/api/common/utils/validators.py b/flexmeasures/api/common/utils/validators.py index 25bfb825a..a45ff673c 100644 --- a/flexmeasures/api/common/utils/validators.py +++ b/flexmeasures/api/common/utils/validators.py @@ -271,7 +271,7 @@ def get_meter_data(user_source_ids): } The source ids then include the user's own id, - and ids of other users that are registered as a Prosumer and/or Energy Service Company. + and ids of other users whose organisation account is registered as a Prosumer and/or Energy Service Company. """ def wrapper(fn): diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 1bbc3de14..2d41991bc 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -13,7 +13,7 @@ ) from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.resources import get_sensors from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.api.common.responses import ( @@ -199,8 +199,8 @@ def collect_connection_and_value_groups( # Get the power values # TODO: fill NaN for non-existing values - power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Power.search( - old_sensor_names=sensor_names, + power_bdf_dict: Dict[str, tb.BeliefsDataFrame] = TimedBelief.search( + sensor_names, event_starts_after=start, event_ends_before=end, resolution=resolution, @@ -210,6 +210,8 @@ def collect_connection_and_value_groups( beliefs_before=belief_time_window[1], user_source_ids=user_source_ids, source_types=source_types, + most_recent_beliefs_only=True, + one_deterministic_belief_per_event=True, sum_multiple=False, ) # Todo: parse time window of power_bdf_dict, which will be different for requests that are not of the form: @@ -317,7 +319,6 @@ def create_connection_and_value_groups( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( - Power, sensor_id, start, start + duration, diff --git a/flexmeasures/api/v1/tests/test_api_v1.py b/flexmeasures/api/v1/tests/test_api_v1.py index 875a3f3d9..38e57baf3 100644 --- a/flexmeasures/api/v1/tests/test_api_v1.py +++ b/flexmeasures/api/v1/tests/test_api_v1.py @@ -199,35 +199,33 @@ def test_get_meter_data(db, app, client, message): [ pd.DataFrame.from_dict( dict( - value=[(100.0 + i) for i in range(6)], - datetime=[ + event_value=[(100.0 + i) for i in range(6)], + event_start=[ isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i) for i in range(6) ], - data_source_id=1, + source_id=1, ) ), pd.DataFrame.from_dict( dict( - value=[(1000.0 - 10 * i) for i in range(6)], - datetime=[ + event_value=[(1000.0 - 10 * i) for i in range(6)], + event_start=[ isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i) for i in range(6) ], - data_source_id=2, + source_id=2, ) ), ] ) if "source" in message: source_ids = validate_user_sources(message["source"]) - expected_values = expected_values[ - expected_values["data_source_id"].isin(source_ids) - ] + expected_values = expected_values[expected_values["source_id"].isin(source_ids)] expected_values = expected_values.set_index( - ["datetime", "data_source_id"] + ["event_start", "source_id"] ).sort_index() # check whether conftest.py did its job setting up the database with expected values diff --git a/flexmeasures/api/v1/tests/utils.py b/flexmeasures/api/v1/tests/utils.py index 10f87e529..a9ef31e91 100644 --- a/flexmeasures/api/v1/tests/utils.py +++ b/flexmeasures/api/v1/tests/utils.py @@ -7,8 +7,7 @@ import pandas as pd from flexmeasures.api.common.utils.validators import validate_user_sources -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief def message_for_get_meter_data( @@ -119,21 +118,26 @@ def verify_power_in_db( ) resolution = sensor.event_resolution query = ( - db.session.query(Power.datetime, Power.value, Power.data_source_id) - .filter((Power.datetime > start - resolution) & (Power.datetime < end)) - .filter(Power.horizon == horizon) + db.session.query( + TimedBelief.event_start, TimedBelief.event_value, TimedBelief.source_id + ) + .filter( + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) + ) + .filter(TimedBelief.belief_horizon == horizon) .join(Sensor) - .filter(Power.sensor_id == Sensor.id) + .filter(TimedBelief.sensor_id == Sensor.id) .filter(Sensor.name == sensor.name) ) if "source" in message: source_ids = validate_user_sources(message["source"]) - query = query.filter(Power.data_source_id.in_(source_ids)) + query = query.filter(TimedBelief.source_id.in_(source_ids)) df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] ) - df = df.set_index(["datetime", "data_source_id"]).sort_index() + df = df.set_index(["event_start", "source_id"]).sort_index() if swapped_sign: - df["value"] = -df["value"] + df["event_value"] = -df["event_value"] - assert df["value"].to_list() == expected_df["value"].to_list() + assert df["event_value"].to_list() == expected_df["event_value"].to_list() diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index 016bcec45..c3c08c5bd 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -128,7 +128,6 @@ def post_price_data_response( if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( - Price, sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, @@ -216,7 +215,6 @@ def post_weather_data_response( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this sensor forecasting_jobs.extend( create_forecasting_jobs( - Weather, sensor.id, start, start + duration, diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1.py b/flexmeasures/api/v1_1/tests/test_api_v1_1.py index 514058311..440b37abc 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1.py @@ -24,7 +24,6 @@ from flexmeasures.auth.error_handling import UNAUTH_ERROR_STATUS from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.user import User -from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor @@ -155,7 +154,6 @@ def test_post_price_data(setup_api_test_data, db, app, clean_redis, post_message for job, horizon in zip(jobs, horizons): assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon - assert job.kwargs["timed_value_type"] == Price assert job.kwargs["old_sensor_id"] == market.id @@ -195,9 +193,9 @@ def test_post_weather_forecasts( ): """ Try to post wind speed and temperature forecasts as a logged-in test user with the Prosumer role, which should succeed. - As only forecasts are sent, no forecasting jobs are expected. + As only forecasts are sent, no additional forecasting jobs are expected. """ - assert len(get_forecasting_jobs("Weather")) == 0 + num_jobs_before = len(get_forecasting_jobs()) # post weather data auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") @@ -210,7 +208,8 @@ def test_post_weather_forecasts( assert post_weather_data_response.status_code == 200 assert post_weather_data_response.json["type"] == "PostWeatherDataResponse" - assert len(get_forecasting_jobs("Weather")) == 0 + num_jobs_after = len(get_forecasting_jobs()) + assert num_jobs_after == num_jobs_before @pytest.mark.parametrize( diff --git a/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py b/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py index c99b04434..aee81bf6e 100644 --- a/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py +++ b/flexmeasures/api/v1_1/tests/test_api_v1_1_fresh_db.py @@ -78,7 +78,7 @@ def test_post_weather_data( assert post_weather_data_response.json["type"] == "PostWeatherDataResponse" forecast_horizons = forecast_horizons_for(timedelta(minutes=5)) - jobs = get_forecasting_jobs("Weather") + jobs = get_forecasting_jobs(last_n=len(forecast_horizons)) for job, horizon in zip( sorted(jobs, key=lambda x: x.kwargs["horizon"]), forecast_horizons ): diff --git a/flexmeasures/api/v1_1/tests/utils.py b/flexmeasures/api/v1_1/tests/utils.py index 6e0efe36f..3d4bc0d49 100644 --- a/flexmeasures/api/v1_1/tests/utils.py +++ b/flexmeasures/api/v1_1/tests/utils.py @@ -9,8 +9,7 @@ from flask import current_app from flexmeasures.api.common.schemas.sensors import SensorField -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief def message_for_get_prognosis( @@ -157,24 +156,29 @@ def verify_prices_in_db(post_message, values, db, swapped_sign: bool = False): sensor = SensorField("market", "fm0").deserialize(post_message["market"]) resolution = sensor.event_resolution query = ( - db.session.query(Price.value, Price.horizon) - .filter((Price.datetime > start - resolution) & (Price.datetime < end)) - .filter(Price.horizon == horizon - (end - (Price.datetime + resolution))) + db.session.query(TimedBelief.event_value, TimedBelief.belief_horizon) + .filter( + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) + ) + .filter( + TimedBelief.belief_horizon + == horizon - (end - (TimedBelief.event_start + resolution)) + ) .join(Sensor) - .filter(Price.sensor_id == Sensor.id) + .filter(TimedBelief.sensor_id == Sensor.id) .filter(Sensor.name == sensor.name) ) df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] ) if swapped_sign: - df["value"] = -df["value"] - assert df.value.tolist() == values + df["event_value"] = -df["event_value"] + assert df["event_value"].tolist() == values -def get_forecasting_jobs(timed_value_type: str) -> List[Job]: - return [ - job - for job in current_app.queues["forecasting"].jobs - if job.kwargs["timed_value_type"] == timed_value_type - ] +def get_forecasting_jobs(last_n: Optional[int] = None) -> List[Job]: + """Get all or last n forecasting jobs.""" + if last_n: + return current_app.queues["forecasting"].jobs[-last_n:] + return current_app.queues["forecasting"].jobs diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index 2ecd638de..53fe333cf 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -8,7 +8,6 @@ import numpy as np import pandas as pd from rq.job import Job, NoSuchJobError -from sqlalchemy import and_, func from flexmeasures.utils.entity_address_utils import ( parse_entity_address, @@ -39,9 +38,9 @@ parse_isodate_str, ) from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.queries.utils import simplify_index from flexmeasures.data.services.resources import has_assets, can_access_asset from flexmeasures.data.services.scheduling import create_scheduling_job @@ -152,46 +151,15 @@ def get_device_message_response(generic_asset_name_groups, duration): message + f'no data is known from "{schedule_data_source_name}".' ) - # todo: after moving the Asset's Power data to the corresponding Sensor's TimedBeliefs, - # the query below should be replaced by: - # sensor.search_beliefs( - # event_starts_after=schedule_start, - # event_ends_before=schedule_start + planning_horizon, - # source=scheduler_source, - # most_recent_beliefs_only=True, - # ) - - # Subquery to get the most recent schedule only - subq = ( - db.session.query( - Power.datetime, - Power.data_source_id, - func.min(Power.horizon).label("most_recent_belief_horizon"), - ) - .filter(Power.sensor_id == sensor_id) - .group_by(Power.datetime, Power.data_source_id) - .subquery() - ) - power_values = ( - Power.query.filter(Power.sensor_id == sensor_id) - .filter(Power.data_source_id == scheduler_source.id) - .filter(Power.datetime >= schedule_start) - .filter(Power.datetime < schedule_start + planning_horizon) - .order_by(Power.datetime.asc()) - .join( - subq, - and_( - Power.datetime == subq.c.datetime, - Power.data_source_id == subq.c.data_source_id, - Power.horizon == subq.c.most_recent_belief_horizon, - ), - ) - .all() + power_values = sensor.search_beliefs( + event_starts_after=schedule_start, + event_ends_before=schedule_start + planning_horizon, + source=scheduler_source, + most_recent_beliefs_only=True, + one_deterministic_belief_per_event=True, ) - consumption_schedule = pd.Series( - [-v.value for v in power_values], - index=pd.DatetimeIndex([v.datetime for v in power_values]), - ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative + # For consumption schedules, positive values denote consumption. For the db, consumption is negative + consumption_schedule = -simplify_index(power_values)["event_value"] if consumption_schedule.empty: return unknown_schedule( message + "the schedule was not found in the database." diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3.py b/flexmeasures/api/v1_3/tests/test_api_v1_3.py index bdf2c7abd..33f508610 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3.py @@ -12,9 +12,8 @@ message_for_get_device_message, message_for_post_udi_event, ) -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.scheduling import handle_scheduling_exception from flexmeasures.utils.calculations import integrate_time_series @@ -97,13 +96,13 @@ def test_post_udi_event_and_get_device_message( scheduler_source is not None ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.sensor_id == sensor.id) - .filter(Power.data_source_id == scheduler_source.id) + TimedBelief.query.filter(TimedBelief.sensor_id == sensor.id) + .filter(TimedBelief.source_id == scheduler_source.id) .all() ) consumption_schedule = pd.Series( - [-v.value for v in power_values], - index=pd.DatetimeIndex([v.datetime for v in power_values], freq=resolution), + [-v.event_value for v in power_values], + index=pd.DatetimeIndex([v.event_start for v in power_values], freq=resolution), ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative assert ( len(consumption_schedule) diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index 0264da331..ee6ce40a7 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -117,7 +117,6 @@ def post_price_data_response( # noqa C901 if current_app.config.get("FLEXMEASURES_MODE", "") != "play": # Forecast 24 and 48 hours ahead for at most the last 24 hours of posted price data forecasting_jobs = create_forecasting_jobs( - Price, sensor.id, max(start, start + duration - timedelta(hours=24)), start + duration, @@ -208,7 +207,6 @@ def post_weather_data_response( # noqa: C901 ): # Todo: replace 0 hours with whatever the moment of switching from ex-ante to ex-post is for this generic asset forecasting_jobs.extend( create_forecasting_jobs( - Weather, sensor.id, start, start + duration, @@ -373,7 +371,6 @@ def post_power_data( if create_forecasting_jobs_too: forecasting_jobs.extend( create_forecasting_jobs( - Power, sensor_id, start, start + duration, diff --git a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py index 2b9a24e68..0436693c9 100644 --- a/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py +++ b/flexmeasures/api/v2_0/tests/test_api_v2_0_sensors_fresh_db.py @@ -10,7 +10,6 @@ message_for_post_price_data, verify_sensor_data_in_db, ) -from flexmeasures.data.models.markets import Price @pytest.mark.parametrize( @@ -60,5 +59,4 @@ def test_post_price_data_2_0( for job, horizon in zip(jobs, horizons): assert job.kwargs["horizon"] == horizon assert job.kwargs["start"] == parse_date(post_message["start"]) + horizon - assert job.kwargs["timed_value_type"] == Price assert job.kwargs["old_sensor_id"] == market.id diff --git a/flexmeasures/api/v2_0/tests/utils.py b/flexmeasures/api/v2_0/tests/utils.py index b0608d509..9d51bd748 100644 --- a/flexmeasures/api/v2_0/tests/utils.py +++ b/flexmeasures/api/v2_0/tests/utils.py @@ -6,10 +6,7 @@ import timely_beliefs as tb from flexmeasures.api.common.schemas.sensors import SensorField -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.models.weather import Weather from flexmeasures.data.services.users import find_user_by_email from flexmeasures.api.v1_1.tests.utils import ( message_for_post_price_data as v1_1_message_for_post_price_data, @@ -78,59 +75,33 @@ def verify_sensor_data_in_db( swapped_sign: bool = False, ): """util method to verify that sensor data ended up in the database""" - if entity_type == "sensor": - data_type = TimedBelief - elif entity_type == "connection": - data_type = Power - elif entity_type == "market": - data_type = Price - elif entity_type == "weather_sensor": - data_type = Weather - else: - raise ValueError("Unknown entity type") - start = parse_datetime(post_message["start"]) end = start + parse_duration(post_message["duration"]) sensor: Sensor = SensorField(entity_type, fm_scheme).deserialize( post_message[entity_type] ) resolution = sensor.event_resolution - if "horizon" in post_message: - horizon = parse_duration(post_message["horizon"]) - query = ( - db.session.query(data_type.datetime, data_type.value, data_type.horizon) - .filter( - (data_type.datetime > start - resolution) & (data_type.datetime < end) - ) - .filter(data_type.horizon == horizon) - .join(Sensor) - .filter(Sensor.name == sensor.name) + query = ( + db.session.query( + TimedBelief.event_start, + TimedBelief.event_value, + TimedBelief.belief_horizon, ) - else: - query = ( - db.session.query( - data_type.datetime, - data_type.value, - data_type.horizon, - ) - .filter( - (data_type.datetime > start - resolution) & (data_type.datetime < end) - ) - # .filter(data_type.horizon == (data_type.datetime + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function - .join(Sensor) - .filter(Sensor.name == sensor.name) + .filter( + (TimedBelief.event_start > start - resolution) + & (TimedBelief.event_start < end) ) - # todo: after basing Price on TimedBelief, we should be able to get a BeliefsDataFrame from the query directly + # .filter(TimedBelief.belief_horizon == (TimedBelief.event_start + resolution) - prior) # only for sensors with 0-hour ex_post knowledge horizon function + .join(Sensor) + .filter(Sensor.name == sensor.name) + ) + if "horizon" in post_message: + horizon = parse_duration(post_message["horizon"]) + query = query.filter(TimedBelief.belief_horizon == horizon) + # todo: after basing sensor data on TimedBelief, we should be able to get a BeliefsDataFrame from the query directly df = pd.DataFrame( query.all(), columns=[col["name"] for col in query.column_descriptions] ) - df = df.rename( - columns={ - "value": "event_value", - "datetime": "event_start", - "horizon": "belief_horizon", - } - ) bdf = tb.BeliefsDataFrame(df, sensor=sensor, source="Some source") if "prior" in post_message: prior = parse_datetime(post_message["prior"]) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 7202abfa3..1ae5a0560 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -16,9 +16,6 @@ from flexmeasures.data import db from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.data.services.users import create_user -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather from flexmeasures.data.models.user import Account, AccountRole, RolesAccounts from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.schemas.sensors import SensorSchema @@ -609,21 +606,11 @@ def create_forecasts( event_resolution = None if as_job: - if asset_type == "Asset": - value_type = Power - elif asset_type == "Market": - value_type = Price - elif asset_type == "WeatherSensor": - value_type = Weather - else: - raise TypeError(f"Unknown asset_type {asset_type}") - for horizon in horizons: # Note that this time period refers to the period of events we are forecasting, while in create_forecasting_jobs # the time period refers to the period of belief_times, therefore we are subtracting the horizon. create_forecasting_jobs( old_sensor_id=asset_id, - timed_value_type=value_type, horizons=[horizon], start_of_roll=forecast_start - horizon, end_of_roll=forecast_end - horizon, diff --git a/flexmeasures/cli/data_delete.py b/flexmeasures/cli/data_delete.py index e66e79de3..005c22a07 100644 --- a/flexmeasures/cli/data_delete.py +++ b/flexmeasures/cli/data_delete.py @@ -6,10 +6,7 @@ from flexmeasures.data import db from flexmeasures.data.models.user import Account, AccountRole, RolesAccounts, User -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.generic_assets import GenericAsset -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather from flexmeasures.data.scripts.data_gen import get_affected_classes from flexmeasures.data.services.users import find_user_by_email, delete_user @@ -117,20 +114,9 @@ def delete_user_and_data(email: str, force: bool): def confirm_deletion( structure: bool = False, data: bool = False, - asset_type: Optional[str] = None, is_by_id: bool = False, ): affected_classes = get_affected_classes(structure, data) - if data and asset_type: - if asset_type == "Asset": - affected_classes.remove(Price) - affected_classes.remove(Weather) - elif asset_type == "Market": - affected_classes.remove(Power) - affected_classes.remove(Weather) - elif asset_type == "WeatherSensor": - affected_classes.remove(Power) - affected_classes.remove(Price) prompt = "This deletes all %s entries from %s.\nDo you want to continue?" % ( " and ".join( ", ".join( @@ -168,32 +154,23 @@ def delete_structure(force): @fm_delete_data.command("measurements") @with_appcontext @click.option( - "--asset-type", - help="Depopulate (time series) data for a specific generic asset type only." - "Follow up with Asset, Market or WeatherSensor.", -) -@click.option( - "--asset-id", + "--sensor-id", type=int, - help="Delete (time series) data for a single asset only. Follow up with the asset's ID. " - "We still need --asset-type, as well, so we know where to look this ID up.", + help="Delete (time series) data for a single sensor only. Follow up with the sensor's ID.", ) @click.option( "--force/--no-force", default=False, help="Skip warning about consequences." ) def delete_measurements( force: bool, - asset_type: Optional[str] = None, - asset_id: Optional[int] = None, + sensor_id: Optional[int] = None, ): - """ Delete measurements (with horizon <= 0).""" + """Delete measurements (ex-post beliefs, i.e. with belief_horizon <= 0).""" if not force: - confirm_deletion( - data=True, asset_type=asset_type, is_by_id=asset_id is not None - ) + confirm_deletion(data=True, is_by_id=sensor_id is not None) from flexmeasures.data.scripts.data_gen import depopulate_measurements - depopulate_measurements(app.db, asset_type, asset_id) + depopulate_measurements(app.db, sensor_id) @fm_delete_data.command("prognoses") @@ -202,29 +179,20 @@ def delete_measurements( "--force/--no-force", default=False, help="Skip warning about consequences." ) @click.option( - "--asset-type", - help="Depopulate (time series) data for a specific generic asset type only. " - "Follow up with Asset, Market or WeatherSensor.", -) -@click.option( - "--asset-id", + "--sensor-id", type=int, - help="Depopulate (time series) data for a single asset only. Follow up with the asset's ID. " - "Use in combination with --asset-type, so we know where to look this name up.", + help="Delete (time series) data for a single sensor only. Follow up with the sensor's ID. ", ) def delete_prognoses( force: bool, - asset_type: Optional[str] = None, - asset_id: Optional[int] = None, + sensor_id: Optional[int] = None, ): - """Delete forecasts and schedules (forecasts > 0).""" + """Delete forecasts and schedules (ex-ante beliefs, i.e. with belief_horizon > 0).""" if not force: - confirm_deletion( - data=True, asset_type=asset_type, is_by_id=asset_id is not None - ) + confirm_deletion(data=True, is_by_id=sensor_id is not None) from flexmeasures.data.scripts.data_gen import depopulate_prognoses - depopulate_prognoses(app.db, asset_type, asset_id) + depopulate_prognoses(app.db, sensor_id) app.cli.add_command(fm_delete_data) diff --git a/flexmeasures/cli/testing.py b/flexmeasures/cli/testing.py index 00ba824ce..de5621788 100644 --- a/flexmeasures/cli/testing.py +++ b/flexmeasures/cli/testing.py @@ -55,7 +55,6 @@ def test_making_forecasts(): create_forecasting_jobs( old_sensor_id=sensor_id, - timed_value_type=Power, horizons=[timedelta(hours=6)], start_of_roll=as_server_time(datetime(2015, 4, 1)), end_of_roll=as_server_time(datetime(2015, 4, 3)), @@ -151,7 +150,6 @@ def test_generic_model( fallback_model_identifier, ) = linear_model_configurator( sensor=sensors[0], - time_series_class=TimedValueType, forecast_start=start, forecast_end=end, forecast_horizon=horizon, diff --git a/flexmeasures/data/models/forecasting/model_spec_factory.py b/flexmeasures/data/models/forecasting/model_spec_factory.py index 8219477a9..74ec0b781 100644 --- a/flexmeasures/data/models/forecasting/model_spec_factory.py +++ b/flexmeasures/data/models/forecasting/model_spec_factory.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Dict, List, Optional, Union from datetime import datetime, timedelta, tzinfo from pprint import pformat import logging @@ -19,10 +19,8 @@ ) import pandas as pd -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import Weather, WeatherSensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.models.weather import WeatherSensor from flexmeasures.data.models.forecasting.utils import ( create_lags, set_training_and_testing_dates, @@ -53,9 +51,9 @@ class TBSeriesSpecs(SeriesSpecs): def __init__( self, - time_series_class, search_params: dict, name: str, + time_series_class: Optional[type] = TimedBelief, search_fnc: str = "search", original_tz: Optional[tzinfo] = pytz.utc, # postgres stores naive datetimes feature_transformation: Optional[ReversibleTransformation] = None, @@ -115,7 +113,6 @@ def check_data(self, df: pd.DataFrame): def create_initial_model_specs( # noqa: C901 sensor: Sensor, - time_series_class: Type[Union[Power, Price, Weather]], forecast_start: datetime, # Start of forecast period forecast_end: datetime, # End of forecast period forecast_horizon: timedelta, # Duration between time of forecasting and end time of the event that is forecast @@ -126,6 +123,7 @@ def create_initial_model_specs( # noqa: C901 custom_model_params: Optional[ dict ] = None, # overwrite model params, most useful for tests or experiments + time_series_class: Optional[type] = TimedBelief, ) -> ModelSpecs: """ Generic model specs for all asset types (also for markets and weather sensors) and horizons. @@ -179,7 +177,7 @@ def create_initial_model_specs( # noqa: C901 name=sensor.generic_asset.generic_asset_type.name, time_series_class=time_series_class, search_params=dict( - old_sensor_names=[sensor.name], + sensors=sensor, event_starts_after=query_window[0], event_ends_before=query_window[1], horizons_at_least=None, @@ -295,9 +293,9 @@ def configure_regressors_for_nearest_weather_sensor( regressor_specs.append( TBSeriesSpecs( name=regressor_specs_name, - time_series_class=Weather, + time_series_class=TimedBelief, search_params=dict( - old_sensor_names=[closest_sensor.name], + sensors=closest_sensor, event_starts_after=query_window[0], event_ends_before=query_window[1], horizons_at_least=horizon, diff --git a/flexmeasures/data/models/forecasting/utils.py b/flexmeasures/data/models/forecasting/utils.py index e095ff01e..17273b3aa 100644 --- a/flexmeasures/data/models/forecasting/utils.py +++ b/flexmeasures/data/models/forecasting/utils.py @@ -21,14 +21,14 @@ def check_data_availability( q = old_time_series_data_model.query.join(old_sensor_model.__class__).filter( old_sensor_model.__class__.name == old_sensor_model.name ) - first_value = q.order_by(old_time_series_data_model.datetime.asc()).first() - last_value = q.order_by(old_time_series_data_model.datetime.desc()).first() + first_value = q.order_by(old_time_series_data_model.event_start.asc()).first() + last_value = q.order_by(old_time_series_data_model.event_start.desc()).first() if first_value is None: raise NotEnoughDataException( "No data available at all. Forecasting impossible." ) - first = as_server_time(first_value.datetime) - last = as_server_time(last_value.datetime) + first = as_server_time(first_value.event_start) + last = as_server_time(last_value.event_start) if query_window[0] < first: suggested_start = forecast_start + (first - query_window[0]) raise NotEnoughDataException( @@ -56,7 +56,7 @@ def create_lags( resolution: timedelta, use_periodicity: bool, ) -> List[timedelta]: - """ List the lags for this asset type, using horizon and resolution information.""" + """List the lags for this asset type, using horizon and resolution information.""" lags = [] # Include a zero lag in case of backwards forecasting diff --git a/flexmeasures/data/models/planning/utils.py b/flexmeasures/data/models/planning/utils.py index 62186c747..8cdf26394 100644 --- a/flexmeasures/data/models/planning/utils.py +++ b/flexmeasures/data/models/planning/utils.py @@ -7,8 +7,7 @@ import numpy as np import timely_beliefs as tb -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.planning.exceptions import ( UnknownMarketException, UnknownPricesException, @@ -82,11 +81,13 @@ def get_prices( # Look for the applicable market sensor sensor = get_market(sensor) - price_bdf: tb.BeliefsDataFrame = Price.search( + price_bdf: tb.BeliefsDataFrame = TimedBelief.search( sensor.name, event_starts_after=query_window[0], event_ends_before=query_window[1], resolution=to_offset(resolution).freqstr, + most_recent_beliefs_only=True, + one_deterministic_belief_per_event=True, ) price_df = simplify_index(price_bdf) nan_prices = price_df.isnull().values diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 83ae79641..c1be68986 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -7,6 +7,7 @@ from sqlalchemy.ext.mutable import MutableDict from sqlalchemy.orm import Query, Session import timely_beliefs as tb +from timely_beliefs.beliefs.probabilistic_utils import get_median_belief import timely_beliefs.utils as tb_utils from flexmeasures.auth.policy import AuthModelMixin @@ -31,7 +32,7 @@ class Sensor(db.Model, tb.SensorDBMixin, AuthModelMixin): - """A sensor measures events. """ + """A sensor measures events.""" attributes = db.Column(MutableDict.as_mutable(db.JSON), nullable=False, default={}) @@ -165,6 +166,7 @@ def latest_state( source=source, most_recent_beliefs_only=True, most_recent_events_only=True, + one_deterministic_belief_per_event=True, ) def search_beliefs( @@ -180,7 +182,8 @@ def search_beliefs( ] = None, most_recent_beliefs_only: bool = False, most_recent_events_only: bool = False, - most_recent_only: bool = False, # deprecated + most_recent_only: bool = None, # deprecated + one_deterministic_belief_per_event: bool = False, as_json: bool = False, ) -> Union[tb.BeliefsDataFrame, str]: """Search all beliefs about events for this sensor. @@ -194,6 +197,7 @@ def search_beliefs( :param source: search only beliefs by this source (pass the DataSource, or its name or id) or list of sources :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) + :param one_deterministic_belief_per_event: only return a single value per event (no probabilistic distribution) :param as_json: return beliefs in JSON format (e.g. for use in charts) rather than as BeliefsDataFrame :returns: BeliefsDataFrame or JSON string (if as_json is True) """ @@ -216,6 +220,7 @@ def search_beliefs( source=source, most_recent_beliefs_only=most_recent_beliefs_only, most_recent_events_only=most_recent_events_only, + one_deterministic_belief_per_event=one_deterministic_belief_per_event, ) if as_json: df = bdf.reset_index() @@ -320,8 +325,22 @@ class TimedBelief(db.Model, tb.TimedBeliefDBMixin): def source_id(cls): return db.Column(db.Integer, db.ForeignKey("data_source.id"), primary_key=True) - sensor = db.relationship("Sensor", backref=db.backref("beliefs", lazy=True)) - source = db.relationship("DataSource", backref=db.backref("beliefs", lazy=True)) + sensor = db.relationship( + "Sensor", + backref=db.backref( + "beliefs", + lazy=True, + cascade="merge", # no save-update (i.e. don't auto-save time series data to session upon updating sensor) + ), + ) + source = db.relationship( + "DataSource", + backref=db.backref( + "beliefs", + lazy=True, + cascade="merge", # no save-update (i.e. don't auto-save time series data to session upon updating source) + ), + ) def __init__( self, @@ -352,7 +371,8 @@ def search( exclude_source_types: Optional[List[str]] = None, most_recent_beliefs_only: bool = False, most_recent_events_only: bool = False, - most_recent_only: bool = False, # deprecated + most_recent_only: bool = None, # deprecated + one_deterministic_belief_per_event: bool = False, resolution: Union[str, timedelta] = None, sum_multiple: bool = True, ) -> Union[tb.BeliefsDataFrame, Dict[str, tb.BeliefsDataFrame]]: @@ -371,6 +391,7 @@ def search( :param exclude_source_types: Optional list of source type names to exclude specific source types * :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) :param most_recent_events_only: only return (post knowledge time) beliefs for the most recent event (maximum event start) + :param one_deterministic_belief_per_event: only return a single value per event (no probabilistic distribution) :param resolution: Optional timedelta or pandas freqstr used to resample the results ** :param sum_multiple: if True, sum over multiple sensors; otherwise, return a dictionary with sensor names as key, each holding a BeliefsDataFrame as its value @@ -410,6 +431,7 @@ def search( source_criteria = get_source_criteria( cls, user_source_ids, source_types, exclude_source_types ) + custom_join_targets = [] if parsed_sources else [DataSource] bdf_dict = {} for sensor in sensors: @@ -426,7 +448,16 @@ def search( most_recent_beliefs_only=most_recent_beliefs_only, most_recent_events_only=most_recent_events_only, custom_filter_criteria=source_criteria, + custom_join_targets=custom_join_targets, ) + if one_deterministic_belief_per_event: + # todo: compute median of collective belief instead of median of first belief (update expected test results accordingly) + # todo: move to timely-beliefs: select mean/median belief + bdf = ( + bdf.for_each_belief(get_median_belief) + .groupby(level=["event_start", "belief_time"]) + .apply(lambda x: x.head(1)) + ) if resolution is not None: bdf = bdf.resample_events( resolution, keep_only_most_recent_belief=most_recent_beliefs_only diff --git a/flexmeasures/data/models/utils.py b/flexmeasures/data/models/utils.py deleted file mode 100644 index f45040931..000000000 --- a/flexmeasures/data/models/utils.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import Union, Type - -from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.weather import WeatherSensor, Weather - - -def determine_old_time_series_class_by_old_sensor( - old_sensor: Union[Asset, Market, WeatherSensor] -) -> Type[Union[Power, Price, Weather]]: - if isinstance(old_sensor, Asset): - return Power - elif isinstance(old_sensor, Market): - return Price - elif isinstance(old_sensor, WeatherSensor): - return Weather - else: - raise TypeError("Unknown old sensor type.") diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index f61dbfd9c..922290097 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -13,9 +13,8 @@ from flexmeasures.utils import calculations, time_utils from flexmeasures.data.services.resources import Resource, find_closest_sensor from flexmeasures.data.models.assets import Asset, Power -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.weather import Weather, WeatherSensorType +from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.models.weather import WeatherSensorType def get_power_data( @@ -176,7 +175,7 @@ def get_prices_data( market_name = "" if market_sensor is None else market_sensor.name # Get price data - price_bdf: tb.BeliefsDataFrame = Price.search( + price_bdf: tb.BeliefsDataFrame = TimedBelief.search( [market_name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -194,7 +193,7 @@ def get_prices_data( metrics["realised_unit_price"] = np.NaN # Get price forecast - price_forecast_bdf: tb.BeliefsDataFrame = Price.search( + price_forecast_bdf: tb.BeliefsDataFrame = TimedBelief.search( [market_name], event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -262,7 +261,7 @@ def get_weather_data( sensor_names = [sensor.name for sensor in closest_sensors] # Get weather data - weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.search( + weather_bdf_dict: Dict[str, tb.BeliefsDataFrame] = TimedBelief.search( sensor_names, event_starts_after=query_window[0], event_ends_before=query_window[1], @@ -279,7 +278,9 @@ def get_weather_data( ) # Get weather forecasts - weather_forecast_bdf_dict: Dict[str, tb.BeliefsDataFrame] = Weather.search( + weather_forecast_bdf_dict: Dict[ + str, tb.BeliefsDataFrame + ] = TimedBelief.search( sensor_names, event_starts_after=query_window[0], event_ends_before=query_window[1], diff --git a/flexmeasures/data/queries/utils.py b/flexmeasures/data/queries/utils.py index f1382c0bd..98de32b35 100644 --- a/flexmeasures/data/queries/utils.py +++ b/flexmeasures/data/queries/utils.py @@ -38,7 +38,7 @@ def create_beliefs_query( def get_source_criteria( - cls: "Type[ts.TimedValue]", + cls: "Union[Type[ts.TimedValue], Type[ts.TimedBelief]]", user_source_ids: Union[int, List[int]], source_types: List[str], exclude_source_types: List[str], @@ -58,7 +58,7 @@ def get_source_criteria( def user_source_criterion( - cls: "Type[ts.TimedValue]", + cls: "Union[Type[ts.TimedValue], Type[ts.TimedBelief]]", user_source_ids: Union[int, List[int]], ) -> BinaryExpression: """Criterion to search only through user data from the specified user sources. @@ -79,7 +79,11 @@ def user_source_criterion( ignorable_user_source_ids = [ user_source.id for user_source in ignorable_user_sources ] - return cls.data_source_id.not_in(ignorable_user_source_ids) + + # todo: [legacy] deprecate this if-statement, which is used to support the TimedValue class + if hasattr(cls, "data_source_id"): + return cls.data_source_id.not_in(ignorable_user_source_ids) + return cls.source_id.not_in(ignorable_user_source_ids) def source_type_criterion(source_types: List[str]) -> BinaryExpression: diff --git a/flexmeasures/data/scripts/data_gen.py b/flexmeasures/data/scripts/data_gen.py index a38cbed2a..280ce2bf1 100644 --- a/flexmeasures/data/scripts/data_gen.py +++ b/flexmeasures/data/scripts/data_gen.py @@ -20,14 +20,13 @@ import inflect from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.models.markets import MarketType, Market, Price -from flexmeasures.data.models.assets import AssetType, Asset, Power +from flexmeasures.data.models.markets import MarketType, Market +from flexmeasures.data.models.assets import AssetType, Asset from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather +from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor from flexmeasures.data.models.user import User, Role, RolesUsers from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import NotEnoughDataException -from flexmeasures.data.models.utils import determine_old_time_series_class_by_old_sensor from flexmeasures.utils.time_utils import ensure_local_timezone from flexmeasures.data.transactional import as_transaction @@ -158,11 +157,12 @@ def add_dummy_tou_market(db: SQLAlchemy): unit="EUR/MWh", ) db.session.add(market) - source = DataSource.query.filter(DataSource.name == "Seita").one_or_none() + source = DataSource.query.filter( + DataSource.name == "Seita", DataSource.type == "demo script" + ).one_or_none() for year in range(2015, 2025): db.session.add( - Price( - use_legacy_kwargs=False, + TimedBelief( event_value=50, event_start=datetime(year, 1, 1, tzinfo=pytz.utc), belief_horizon=timedelta(0), @@ -266,9 +266,6 @@ def populate_time_series_forecasts( # noqa: C901 default_model = lookup_model_specs_configurator() model_specs, model_identifier, model_fallback = default_model( sensor=old_sensor.corresponding_sensor, - time_series_class=determine_old_time_series_class_by_old_sensor( - old_sensor - ), forecast_start=forecast_start, forecast_end=forecast_end, forecast_horizon=horizon, @@ -318,43 +315,16 @@ def populate_time_series_forecasts( # noqa: C901 plt.show() """ - beliefs = [] - if isinstance(old_sensor, Asset): - beliefs = [ - Power( - use_legacy_kwargs=False, - event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), - belief_horizon=horizon, - event_value=value, - sensor=old_sensor.corresponding_sensor, - source=data_source, - ) - for dt, value in forecasts.items() - ] - elif isinstance(old_sensor, Market): - beliefs = [ - Price( - use_legacy_kwargs=False, - event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), - belief_horizon=horizon, - event_value=value, - sensor=old_sensor.corresponding_sensor, - source=data_source, - ) - for dt, value in forecasts.items() - ] - elif isinstance(old_sensor, WeatherSensor): - beliefs = [ - Weather( - use_legacy_kwargs=False, - event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), - belief_horizon=horizon, - event_value=value, - sensor=old_sensor.corresponding_sensor, - source=data_source, - ) - for dt, value in forecasts.items() - ] + beliefs = [ + TimedBelief( + event_start=ensure_local_timezone(dt, tz_name=LOCAL_TIME_ZONE), + belief_horizon=horizon, + event_value=value, + sensor=old_sensor.corresponding_sensor, + source=data_source, + ) + for dt, value in forecasts.items() + ] print( "Saving %s %s-forecasts for %s..." @@ -364,16 +334,10 @@ def populate_time_series_forecasts( # noqa: C901 db.session.add(belief) click.echo( - "DB now has %d Power Forecasts" - % db.session.query(Power).filter(Power.horizon > timedelta(hours=0)).count() - ) - click.echo( - "DB now has %d Price Forecasts" - % db.session.query(Price).filter(Price.horizon > timedelta(hours=0)).count() - ) - click.echo( - "DB now has %d Weather Forecasts" - % db.session.query(Weather).filter(Weather.horizon > timedelta(hours=0)).count() + "DB now has %d forecasts" + % db.session.query(TimedBelief) + .filter(TimedBelief.belief_horizon > timedelta(hours=0)) + .count() ) @@ -411,186 +375,45 @@ def depopulate_structure(db: SQLAlchemy): @as_transaction def depopulate_measurements( db: SQLAlchemy, - old_sensor_class_name: Optional[str] = None, - old_sensor_id: Optional[id] = None, + sensor_id: Optional[id] = None, ): - click.echo("Depopulating (time series) data from the database %s ..." % db.engine) - num_prices_deleted = 0 - num_power_measurements_deleted = 0 - num_weather_measurements_deleted = 0 - - # TODO: simplify this when sensors moved to one unified table - - if old_sensor_id is None: - if old_sensor_class_name is None or old_sensor_class_name == "Market": - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon <= timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "Asset": - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon <= timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon <= timedelta(hours=0)) - .delete() - ) - else: - if old_sensor_class_name is None: - click.echo( - "If you specify --asset-name, please also specify --asset-type, so we can look it up." - ) - return - if old_sensor_class_name == "Market": - market = ( - db.session.query(Market) - .filter(Market.id == old_sensor_id) - .one_or_none() - ) - if market is not None: - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon <= timedelta(hours=0)) - .filter(Price.sensor_id == market.id) - .delete() - ) - else: - num_prices_deleted = 0 + click.echo("Deleting (time series) data from the database %s ..." % db.engine) - elif old_sensor_class_name == "Asset": - asset = ( - db.session.query(Asset).filter(Asset.id == old_sensor_id).one_or_none() - ) - if asset is not None: - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon <= timedelta(hours=0)) - .filter(Power.sensor_id == asset.id) - .delete() - ) - else: - num_power_measurements_deleted = 0 - - elif old_sensor_class_name == "WeatherSensor": - sensor = ( - db.session.query(WeatherSensor) - .filter(WeatherSensor.id == old_sensor_id) - .one_or_none() - ) - if sensor is not None: - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon <= timedelta(hours=0)) - .filter(Weather.sensor_id == sensor.id) - .delete() - ) - else: - num_weather_measurements_deleted = 0 + query = db.session.query(TimedBelief).filter( + TimedBelief.belief_horizon <= timedelta(hours=0) + ) + if sensor_id is not None: + query = query.filter(TimedBelief.sensor_id == sensor_id) + num_measurements_deleted = query.delete() - click.echo("Deleted %d Prices" % num_prices_deleted) - click.echo("Deleted %d Power Measurements" % num_power_measurements_deleted) - click.echo("Deleted %d Weather Measurements" % num_weather_measurements_deleted) + click.echo("Deleted %d measurements (ex-post beliefs)" % num_measurements_deleted) @as_transaction def depopulate_prognoses( db: SQLAlchemy, - old_sensor_class_name: Optional[str] = None, - old_sensor_id: Optional[id] = None, + sensor_id: Optional[id] = None, ): click.echo( - "Depopulating (time series) forecasts and schedules data from the database %s ..." + "Deleting (time series) forecasts and schedules data from the database %s ..." % db.engine ) - num_prices_deleted = 0 - num_power_measurements_deleted = 0 - num_weather_measurements_deleted = 0 # Clear all jobs num_forecasting_jobs_deleted = app.queues["forecasting"].empty() num_scheduling_jobs_deleted = app.queues["scheduling"].empty() # Clear all forecasts (data with positive horizon) - if old_sensor_id is None: - if old_sensor_class_name is None or old_sensor_class_name == "Market": - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon > timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "Asset": - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon > timedelta(hours=0)) - .delete() - ) - if old_sensor_class_name is None or old_sensor_class_name == "WeatherSensor": - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon > timedelta(hours=0)) - .delete() - ) - else: - click.echo( - "Depopulating (time series) forecasts and schedules for %s from the database %s ..." - % (old_sensor_id, db.engine) - ) - - if old_sensor_class_name == "Market": - market = ( - db.session.query(Market) - .filter(Market.id == old_sensor_id) - .one_or_none() - ) - if market is not None: - num_prices_deleted = ( - db.session.query(Price) - .filter(Price.horizon > timedelta(hours=0)) - .filter(Price.sensor_id == market.id) - .delete() - ) - else: - num_prices_deleted = 0 - - if old_sensor_class_name == "Asset": - asset = ( - db.session.query(Asset).filter(Asset.id == old_sensor_id).one_or_none() - ) - if asset is not None: - num_power_measurements_deleted = ( - db.session.query(Power) - .filter(Power.horizon > timedelta(hours=0)) - .filter(Power.sensor_id == asset.id) - .delete() - ) - else: - num_power_measurements_deleted = 0 + query = db.session.query(TimedBelief).filter( + TimedBelief.belief_horizon > timedelta(hours=0) + ) + if sensor_id is not None: + query = query.filter(TimedBelief.sensor_id == sensor_id) + num_forecasts_deleted = query.delete() - if old_sensor_class_name == "WeatherSensor": - sensor = ( - db.session.query(WeatherSensor) - .filter(WeatherSensor.id == old_sensor_id) - .one_or_none() - ) - if sensor is not None: - num_weather_measurements_deleted = ( - db.session.query(Weather) - .filter(Weather.horizon > timedelta(hours=0)) - .filter(Weather.sensor_id == sensor.id) - .delete() - ) - else: - num_weather_measurements_deleted = 0 click.echo("Deleted %d Forecast Jobs" % num_forecasting_jobs_deleted) click.echo("Deleted %d Schedule Jobs" % num_scheduling_jobs_deleted) - click.echo("Deleted %d Price Forecasts" % num_prices_deleted) - click.echo("Deleted %d Power Forecasts" % num_power_measurements_deleted) - click.echo("Deleted %d Weather Forecasts" % num_weather_measurements_deleted) + click.echo("Deleted %d forecasts (ex-ante beliefs)" % num_forecasts_deleted) def reset_db(db: SQLAlchemy): @@ -706,5 +529,5 @@ def get_affected_classes(structure: bool = True, data: bool = False) -> List: DataSource, ] if data: - affected_classes += [TimedBelief, Power, Price, Weather] + affected_classes += [TimedBelief] return affected_classes diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 1502bada1..6e9e9e176 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -1,25 +1,23 @@ from datetime import datetime, timedelta -from typing import List, Type, Union +from typing import List from flask import current_app import click from rq import get_current_job from rq.job import Job -from sqlalchemy.exc import IntegrityError from timetomodel.forecasting import make_rolling_forecasts +import timely_beliefs as tb +from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException -from flexmeasures.data.models.markets import Price from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.forecasting.utils import ( get_query_window, check_data_availability, ) -from flexmeasures.data.models.weather import Weather -from flexmeasures.data.utils import save_to_session, get_data_source +from flexmeasures.data.utils import get_data_source from flexmeasures.utils.time_utils import ( as_server_time, server_now, @@ -46,7 +44,6 @@ class MisconfiguredForecastingJobException(Exception): def create_forecasting_jobs( - timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], old_sensor_id: int, start_of_roll: datetime, end_of_roll: datetime, @@ -101,7 +98,6 @@ def create_forecasting_jobs( make_rolling_viewpoint_forecasts, kwargs=dict( old_sensor_id=old_sensor_id, - timed_value_type=timed_value_type, horizon=horizon, start=start_of_roll + horizon, end=end_of_roll + horizon, @@ -124,7 +120,6 @@ def create_forecasting_jobs( def make_fixed_viewpoint_forecasts( old_sensor_id: int, - timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], horizon: timedelta, start: datetime, end: datetime, @@ -142,7 +137,6 @@ def make_fixed_viewpoint_forecasts( def make_rolling_viewpoint_forecasts( old_sensor_id: int, - timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]], horizon: timedelta, start: datetime, end: datetime, @@ -159,8 +153,6 @@ def make_rolling_viewpoint_forecasts( ---------- :param old_sensor_id: int To identify which old sensor to forecast (note: old_sensor_id == sensor_id) - :param timed_value_type: Type[Union[TimedBelief, Power, Price, Weather]] - This should go away after a refactoring - we now use it to create the DB entry for the forecasts :param horizon: timedelta duration between the end of each interval and the time at which the belief about that interval is formed :param start: datetime @@ -198,7 +190,6 @@ def make_rolling_viewpoint_forecasts( model_configurator = lookup_model_specs_configurator(model_search_term) model_specs, model_identifier, fallback_model_search_term = model_configurator( sensor=sensor, - time_series_class=timed_value_type, forecast_start=as_server_time(start), forecast_end=as_server_time(end), forecast_horizon=horizon, @@ -224,7 +215,7 @@ def make_rolling_viewpoint_forecasts( ) check_data_availability( sensor, - timed_value_type, + TimedBelief, start, end, query_window, @@ -245,8 +236,7 @@ def make_rolling_viewpoint_forecasts( click.echo("Job %s made %d forecasts." % (rq_job.id, len(forecasts))) ts_value_forecasts = [ - timed_value_type( - use_legacy_kwargs=False, + TimedBelief( event_start=dt, belief_horizon=horizon, event_value=value, @@ -255,20 +245,8 @@ def make_rolling_viewpoint_forecasts( ) for dt, value in forecasts.items() ] - - try: - save_to_session(ts_value_forecasts) - except IntegrityError as e: - - current_app.logger.warning(e) - click.echo("Rolling back due to IntegrityError") - db.session.rollback() - - if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - click.echo("Saving again, with overwrite=True") - save_to_session(ts_value_forecasts, overwrite=True) - - db.session.commit() + bdf = tb.BeliefsDataFrame(ts_value_forecasts) + save_to_db(bdf) return len(forecasts) diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index d4a7f8247..28b9459cd 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -31,7 +31,7 @@ assets_share_location, ) from flexmeasures.data.models.markets import Market, Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.weather import Weather, WeatherSensorType from flexmeasures.data.models.user import User from flexmeasures.data.queries.utils import simplify_index @@ -484,8 +484,8 @@ def load_sensor_data( ) # Query the sensors - resource_data: Dict[str, tb.BeliefsDataFrame] = sensor_type.search( - old_sensor_names=list(names_of_resource_sensors), + resource_data: Dict[str, tb.BeliefsDataFrame] = TimedBelief.search( + list(names_of_resource_sensors), event_starts_after=start, event_ends_before=end, horizons_at_least=belief_horizon_window[0], diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index 10928f2ae..d80150a49 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -8,14 +8,14 @@ import pytz from rq import get_current_job from rq.job import Job -from sqlalchemy.exc import IntegrityError +import timely_beliefs as tb +from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.utils import save_to_session, get_data_source +from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.utils import get_data_source """ The life cycle of a scheduling job: @@ -145,8 +145,7 @@ def make_schedule( click.echo("Job %s made schedule." % rq_job.id) ts_value_schedule = [ - Power( - use_legacy_kwargs=False, + TimedBelief( event_start=dt, belief_horizon=dt.astimezone(pytz.utc) - belief_time.astimezone(pytz.utc), event_value=-value, @@ -155,20 +154,8 @@ def make_schedule( ) for dt, value in consumption_schedule.items() ] # For consumption schedules, positive values denote consumption. For the db, consumption is negative - - try: - save_to_session(ts_value_schedule) - except IntegrityError as e: - - current_app.logger.warning(e) - click.echo("Rolling back due to IntegrityError") - db.session.rollback() - - if current_app.config.get("FLEXMEASURES_MODE", "") == "play": - click.echo("Saving again, with overwrite=True") - save_to_session(ts_value_schedule, overwrite=True) - - db.session.commit() + bdf = tb.BeliefsDataFrame(ts_value_schedule) + save_to_db(bdf) return True diff --git a/flexmeasures/data/tests/test_forecasting_jobs.py b/flexmeasures/data/tests/test_forecasting_jobs.py index bf007a459..df839f913 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs.py +++ b/flexmeasures/data/tests/test_forecasting_jobs.py @@ -7,8 +7,7 @@ from rq.job import Job from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, @@ -18,7 +17,7 @@ def custom_model_params(): - """ little training as we have little data, turn off transformations until they let this test run (TODO) """ + """little training as we have little data, turn off transformations until they let this test run (TODO)""" return dict( training_and_testing_period=timedelta(hours=2), outcome_var_transformation=None, @@ -39,12 +38,12 @@ def check_aggregate(overall_expected: int, horizon: timedelta, sensor_id: int): """Check that the expected number of forecasts were made for the given horizon, and check that each forecast is a number.""" all_forecasts = ( - Power.query.filter(Power.sensor_id == sensor_id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == sensor_id) + .filter(TimedBelief.belief_horizon == horizon) .all() ) assert len(all_forecasts) == overall_expected - assert all([not np.isnan(f.value) for f in all_forecasts]) + assert all([not np.isnan(f.event_value) for f in all_forecasts]) def test_forecasting_an_hour_of_wind(db, app, setup_test_data): @@ -59,7 +58,6 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): # makes 4 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 6)), end_of_roll=as_server_time(datetime(2015, 1, 1, 7)), horizons=[horizon], @@ -74,11 +72,11 @@ def test_forecasting_an_hour_of_wind(db, app, setup_test_data): assert get_data_source() is not None forecasts = ( - Power.query.filter(Power.sensor_id == wind_device_1.id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == wind_device_1.id) + .filter(TimedBelief.belief_horizon == horizon) .filter( - (Power.datetime >= as_server_time(datetime(2015, 1, 1, 7))) - & (Power.datetime < as_server_time(datetime(2015, 1, 1, 8))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 7))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 8))) ) .all() ) @@ -91,18 +89,17 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ last_power_datetime = ( ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == timedelta(hours=0)) - .order_by(Power.datetime.desc()) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == timedelta(hours=0)) + .order_by(TimedBelief.event_start.desc()) ) .first() - .datetime + .event_start ) # datetime index of the last power value 11.45pm (Jan 1st) # makes 4 forecasts, 1 of which is for a new datetime index horizon = timedelta(hours=6) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=last_power_datetime - horizon - timedelta(minutes=30), # start of data on which forecast is based (5.15pm) @@ -120,9 +117,9 @@ def test_forecasting_two_hours_of_solar_at_edge_of_data_set(db, app, setup_test_ work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == horizon) - .filter(Power.datetime > last_power_datetime) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == horizon) + .filter(TimedBelief.event_start > last_power_datetime) .all() ) assert len(forecasts) == 1 @@ -176,7 +173,6 @@ def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data) (Power data is in 2015)""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2016, 1, 1, 20)), end_of_roll=as_server_time(datetime(2016, 1, 1, 22)), horizons=[timedelta(hours=1)], @@ -188,10 +184,9 @@ def test_failed_forecasting_insufficient_data(app, clean_redis, setup_test_data) def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): - """ This one (as well as the fallback) should fail as the horizon is invalid.""" + """This one (as well as the fallback) should fail as the horizon is invalid.""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 21)), end_of_roll=as_server_time(datetime(2015, 1, 1, 23)), horizons=[timedelta(hours=18)], @@ -203,7 +198,7 @@ def test_failed_forecasting_invalid_horizon(app, clean_redis, setup_test_data): def test_failed_unknown_model(app, clean_redis, setup_test_data): - """ This one should fail because we use a model search term which yields no model configurator.""" + """This one should fail because we use a model search term which yields no model configurator.""" solar_device1: Sensor = Sensor.query.filter_by(name="solar-asset-1").one_or_none() horizon = timedelta(hours=1) @@ -211,7 +206,6 @@ def test_failed_unknown_model(app, clean_redis, setup_test_data): cmp["training_and_testing_period"] = timedelta(days=365) create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], diff --git a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py index 27ff1df45..b21a8f959 100644 --- a/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_forecasting_jobs_fresh_db.py @@ -3,8 +3,7 @@ import pytest from sqlalchemy.orm import Query -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.forecasting import ( create_forecasting_jobs, handle_forecasting_exception, @@ -25,7 +24,6 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis # makes 12 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 10)), end_of_roll=as_server_time(datetime(2015, 1, 1, 13)), horizons=[horizon], @@ -37,11 +35,11 @@ def test_forecasting_three_hours_of_wind(app, setup_fresh_test_data, clean_redis work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.sensor_id == wind_device2.id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == wind_device2.id) + .filter(TimedBelief.belief_horizon == horizon) .filter( - (Power.datetime >= as_server_time(datetime(2015, 1, 1, 11))) - & (Power.datetime < as_server_time(datetime(2015, 1, 1, 14))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 11))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 14))) ) .all() ) @@ -58,7 +56,6 @@ def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis) # makes 8 forecasts horizon = timedelta(hours=1) job = create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, 12)), end_of_roll=as_server_time(datetime(2015, 1, 1, 14)), horizons=[horizon], @@ -69,11 +66,11 @@ def test_forecasting_two_hours_of_solar(app, setup_fresh_test_data, clean_redis) work_on_rq(app.queues["forecasting"], exc_handler=handle_forecasting_exception) forecasts = ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == horizon) .filter( - (Power.datetime >= as_server_time(datetime(2015, 1, 1, 13))) - & (Power.datetime < as_server_time(datetime(2015, 1, 1, 15))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 1, 1, 13))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 1, 1, 15))) ) .all() ) @@ -106,7 +103,6 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( # The failed test model (this failure enqueues a new job) create_forecasting_jobs( - timed_value_type=Power, start_of_roll=as_server_time(datetime(2015, 1, 1, hour_start)), end_of_roll=as_server_time(datetime(2015, 1, 1, hour_start + 2)), horizons=[horizon], @@ -127,17 +123,17 @@ def test_failed_model_with_too_much_training_then_succeed_with_fallback( def make_query(the_horizon_hours: int) -> Query: the_horizon = timedelta(hours=the_horizon_hours) return ( - Power.query.filter(Power.sensor_id == solar_device1.id) - .filter(Power.horizon == the_horizon) + TimedBelief.query.filter(TimedBelief.sensor_id == solar_device1.id) + .filter(TimedBelief.belief_horizon == the_horizon) .filter( ( - Power.datetime + TimedBelief.event_start >= as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours) ) ) & ( - Power.datetime + TimedBelief.event_start < as_server_time( datetime(2015, 1, 1, hour_start + the_horizon_hours + 2) ) @@ -155,7 +151,7 @@ def make_query(the_horizon_hours: int) -> Query: existing_data = make_query(the_horizon_hours=0).all() for ed, fd in zip(existing_data, forecasts): - assert ed.value == fd.value + assert ed.event_value == fd.event_value # Now to check which models actually got to work. # We check which data sources do and do not exist by now: diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 0605fa3e0..bf9696c5f 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -6,7 +6,6 @@ import pytz import timely_beliefs as tb -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.queries.utils import ( @@ -41,19 +40,25 @@ ) def test_collect_power(db, app, query_start, query_end, num_values, setup_test_data): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - data = Power.query.filter(Power.sensor_id == wind_device_1.id).all() + data = TimedBelief.query.filter(TimedBelief.sensor_id == wind_device_1.id).all() print(data) - bdf: tb.BeliefsDataFrame = Power.search(wind_device_1.name, query_start, query_end) + bdf: tb.BeliefsDataFrame = TimedBelief.search( + wind_device_1.name, + event_starts_after=query_start, + event_ends_before=query_end, + ) print(bdf) assert ( bdf.index.names[0] == "event_start" ) # first index level of collect function should be event_start, so that df.loc[] refers to event_start assert pd.api.types.is_timedelta64_dtype( - bdf.index.get_level_values("belief_horizon") + bdf.convert_index_from_belief_time_to_horizon().index.get_level_values( + "belief_horizon" + ) ) # dtype of belief_horizon is timedelta64[ns], so the minimum horizon on an empty BeliefsDataFrame is NaT instead of NaN assert len(bdf) == num_values - for v1, v2 in zip(bdf.values, data): - assert abs(v1[0] - v2.value) < 10 ** -6 + for v1, v2 in zip(bdf["event_value"].tolist(), data): + assert abs(v1 - v2.event_value) < 10 ** -6 @pytest.mark.parametrize( @@ -89,8 +94,12 @@ def test_collect_power_resampled( db, app, query_start, query_end, resolution, num_values, setup_test_data ): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - bdf: tb.BeliefsDataFrame = Power.search( - wind_device_1.name, query_start, query_end, resolution=resolution + bdf: tb.BeliefsDataFrame = TimedBelief.search( + wind_device_1.name, + event_starts_after=query_start, + event_ends_before=query_end, + resolution=resolution, + most_recent_beliefs_only=True, ) print(bdf) assert len(bdf) == num_values @@ -206,10 +215,10 @@ def test_multiplication_with_both_empty_dataframe(): def test_simplify_index(setup_test_data, check_empty_frame): """Check whether simplify_index retains the event resolution.""" wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() - bdf: tb.BeliefsDataFrame = Power.search( + bdf: tb.BeliefsDataFrame = TimedBelief.search( wind_device_1.name, - datetime(2015, 1, 1, tzinfo=pytz.utc), - datetime(2015, 1, 2, tzinfo=pytz.utc), + event_starts_after=datetime(2015, 1, 1, tzinfo=pytz.utc), + event_ends_before=datetime(2015, 1, 2, tzinfo=pytz.utc), resolution=timedelta(minutes=15), ) if check_empty_frame: diff --git a/flexmeasures/data/tests/test_scheduling_jobs.py b/flexmeasures/data/tests/test_scheduling_jobs.py index 6ca2f1c02..55add185b 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs.py +++ b/flexmeasures/data/tests/test_scheduling_jobs.py @@ -2,8 +2,7 @@ from datetime import datetime, timedelta from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.assets import Power -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.tests.utils import work_on_rq, exception_reporter from flexmeasures.data.services.scheduling import create_scheduling_job from flexmeasures.utils.time_utils import as_server_time @@ -41,9 +40,9 @@ def test_scheduling_a_battery(db, app, add_battery_assets, setup_test_data): ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.sensor_id == battery.id) - .filter(Power.data_source_id == scheduler_source.id) + TimedBelief.query.filter(TimedBelief.sensor_id == battery.id) + .filter(TimedBelief.source_id == scheduler_source.id) .all() ) - print([v.value for v in power_values]) + print([v.event_value for v in power_values]) assert len(power_values) == 96 diff --git a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py index 499760ed3..9b9efbddd 100644 --- a/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py +++ b/flexmeasures/data/tests/test_scheduling_jobs_fresh_db.py @@ -3,9 +3,8 @@ import numpy as np import pandas as pd -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.scheduling import create_scheduling_job from flexmeasures.data.tests.utils import work_on_rq, exception_reporter from flexmeasures.utils.time_utils import as_server_time @@ -63,13 +62,13 @@ def test_scheduling_a_charging_station( ) # Make sure the scheduler data source is now there power_values = ( - Power.query.filter(Power.sensor_id == charging_station.id) - .filter(Power.data_source_id == scheduler_source.id) + TimedBelief.query.filter(TimedBelief.sensor_id == charging_station.id) + .filter(TimedBelief.source_id == scheduler_source.id) .all() ) consumption_schedule = pd.Series( - [-v.value for v in power_values], - index=pd.DatetimeIndex([v.datetime for v in power_values]), + [-v.event_value for v in power_values], + index=pd.DatetimeIndex([v.event_start for v in power_values]), ) # For consumption schedules, positive values denote consumption. For the db, consumption is negative assert len(consumption_schedule) == 96 print(consumption_schedule.head(12)) diff --git a/flexmeasures/data/tests/test_user_services.py b/flexmeasures/data/tests/test_user_services.py index 37e4a0bd1..a9f856c84 100644 --- a/flexmeasures/data/tests/test_user_services.py +++ b/flexmeasures/data/tests/test_user_services.py @@ -9,8 +9,9 @@ delete_user, InvalidFlexMeasuresUser, ) -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief def test_create_user( @@ -89,7 +90,9 @@ def test_delete_user(fresh_db, setup_roles_users_fresh_db, app): ).all() asset_ids = [asset.id for asset in user_assets_with_measurements_before] for asset_id in asset_ids: - num_power_measurements = Power.query.filter(Power.sensor_id == asset_id).count() + num_power_measurements = TimedBelief.query.filter( + TimedBelief.sensor_id == asset_id + ).count() assert num_power_measurements == 96 delete_user(prosumer) assert find_user_by_email("test_prosumer_user@seita.nl") is None @@ -97,5 +100,7 @@ def test_delete_user(fresh_db, setup_roles_users_fresh_db, app): assert len(user_assets_after) == 0 assert User.query.count() == num_users_before - 1 for asset_id in asset_ids: - num_power_measurements = Power.query.filter(Power.sensor_id == asset_id).count() + num_power_measurements = TimedBelief.query.filter( + TimedBelief.sensor_id == asset_id + ).count() assert num_power_measurements == 0 diff --git a/flexmeasures/ui/charts/latest_state.py b/flexmeasures/ui/charts/latest_state.py index 8422c5e57..1d34d5f83 100644 --- a/flexmeasures/ui/charts/latest_state.py +++ b/flexmeasures/ui/charts/latest_state.py @@ -34,14 +34,15 @@ def get_latest_power_as_plot(sensor: Sensor, small: bool = False) -> Tuple[str, latest_power = sensor.latest_state() if not latest_power.empty: - # TODO: Get first entry - latest_power_value = latest_power.event_value + latest_power_value = latest_power["event_value"].values[0] if current_app.config.get("FLEXMEASURES_MODE", "") == "demo": - latest_power_datetime = latest_power.belief_time.replace( - year=datetime.now().year + latest_power_datetime = ( + latest_power.event_ends[0] + .to_pydatetime() + .replace(year=datetime.now().year) ) else: - latest_power_datetime = latest_power.belief_time + latest_power_datetime = latest_power.event_ends[0].to_pydatetime() latest_measurement_time_str = localized_datetime_str( latest_power_datetime + sensor.event_resolution ) diff --git a/requirements/app.in b/requirements/app.in index 8b791b450..fa3961a06 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -32,7 +32,7 @@ netCDF4 siphon tables timetomodel>=0.7.1 -timely-beliefs>=1.8.0 +timely-beliefs>=1.9.0 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index 3b2eaa204..95836d9f3 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -354,7 +354,7 @@ tables==3.6.1 # via -r requirements/app.in threadpoolctl==3.0.0 # via scikit-learn -timely-beliefs==1.8.0 +timely-beliefs==1.9.0 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in From 4fb564be3f5f9c35551e504bd5dae037a238827b Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Wed, 29 Dec 2021 22:42:21 +0100 Subject: [PATCH 23/46] Sub issue 284c migrate power/price/weather data to timed belief (#288) Copy over all time series data, in batches. * Query TimedBelief rather than Power in api v1.3 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1.3 implementations Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in user services tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in query tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in forecasting tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in scheduling tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 tests Signed-off-by: F.N. Claessen * Simplify data deletion, like, by a lot Signed-off-by: F.N. Claessen * Count ex-ante TimedBeliefs after populating time series forecasts Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price in api v1_1 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in Resource.load_sensor_data Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in api v2.0 tests Signed-off-by: F.N. Claessen * Refactor: simplify duplicate query construction Signed-off-by: F.N. Claessen * Add custom join target to get rid of SA warning Signed-off-by: F.N. Claessen * Filter criteria should work for both TimedBeliefs and TimedValues Signed-off-by: F.N. Claessen * Clarify docstring Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 implementations Signed-off-by: F.N. Claessen * Schedules should contain one deterministic belief per event Signed-off-by: F.N. Claessen * Fix type annotation Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price/Weather for analytics Signed-off-by: F.N. Claessen * Query deterministic TimedBelief rather than Price for planning queries Signed-off-by: F.N. Claessen * Forecast TimedBelief rather than Power/Price/Weather Signed-off-by: F.N. Claessen * Schedule TimedBelief rather than Power Signed-off-by: F.N. Claessen * Apparently, to initialize a TimedBelief is to save a TimedBelief, too Signed-off-by: F.N. Claessen * Create TimedBelief rather than Power/Price/Weather in data generation script Signed-off-by: F.N. Claessen * Bump timely-beliefs dependency Signed-off-by: F.N. Claessen * Start database migration file and copy time series data Signed-off-by: F.N. Claessen * Remove raise statement used for debugging Signed-off-by: F.N. Claessen * Add changelog entry Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 1 + ..._time_series_data_to_TimedBeliefs_table.py | 122 ++++++++++++++++++ flexmeasures/data/tests/test_queries.py | 4 +- 3 files changed, 125 insertions(+), 2 deletions(-) create mode 100644 flexmeasures/data/migrations/versions/e690d373a3d9_copy_Power_Price_Weather_time_series_data_to_TimedBeliefs_table.py diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 2844d3399..4015730cc 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -23,6 +23,7 @@ Infrastructure / Support * Allow plugins to register their custom config settings, so that FlexMeasures can check whether they are set up correctly [see `PR #230 `_ and `PR #237 `_] * Add sensor method to obtain just its latest state (excl. forecasts) [see `PR #235 `_] * Migrate attributes of assets, markets and weather sensors to our new sensor model [see `PR #254 `_ and `project 9 `_] +* Migrate all time series data to our new sensor data model based on the `timely beliefs `_ lib [see `PR #286 `_ and `project 9 `_] v0.7.1 | November 08, 2021 diff --git a/flexmeasures/data/migrations/versions/e690d373a3d9_copy_Power_Price_Weather_time_series_data_to_TimedBeliefs_table.py b/flexmeasures/data/migrations/versions/e690d373a3d9_copy_Power_Price_Weather_time_series_data_to_TimedBeliefs_table.py new file mode 100644 index 000000000..c44782025 --- /dev/null +++ b/flexmeasures/data/migrations/versions/e690d373a3d9_copy_Power_Price_Weather_time_series_data_to_TimedBeliefs_table.py @@ -0,0 +1,122 @@ +"""Copy Power/Price/Weather time series data to TimedBeliefs table + +Revision ID: e690d373a3d9 +Revises: 830e72a8b218 +Create Date: 2021-12-27 15:01:38.967237 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "e690d373a3d9" +down_revision = "830e72a8b218" +branch_labels = None +depends_on = None + + +def upgrade(): + print("Database migration started") + print("- preparing to copy time series data...") + + # Declare ORM table views + t_power = sa.Table( + "power", + sa.MetaData(), + sa.Column("sensor_id"), + sa.Column("datetime"), + sa.Column("horizon"), + sa.Column("value"), + sa.Column("data_source_id"), + ) + t_price = sa.Table( + "price", + sa.MetaData(), + sa.Column("sensor_id"), + sa.Column("datetime"), + sa.Column("horizon"), + sa.Column("value"), + sa.Column("data_source_id"), + ) + t_weather = sa.Table( + "weather", + sa.MetaData(), + sa.Column("sensor_id"), + sa.Column("datetime"), + sa.Column("horizon"), + sa.Column("value"), + sa.Column("data_source_id"), + ) + t_timed_belief = sa.Table( + "timed_belief", + sa.MetaData(), + sa.Column("sensor_id"), + sa.Column("event_start"), + sa.Column("belief_horizon"), + sa.Column("event_value"), + sa.Column("cumulative_probability"), + sa.Column("source_id"), + ) + + # Use SQLAlchemy's connection and transaction to go through the data + connection = op.get_bind() + + copy_time_series_data( + connection, + t_price, + t_timed_belief, + ) + copy_time_series_data( + connection, + t_power, + t_timed_belief, + ) + copy_time_series_data( + connection, + t_weather, + t_timed_belief, + ) + print("- finished copying time series data...") + + +def downgrade(): + pass + + +def copy_time_series_data( + connection, + t_old_data_model, + t_timed_belief, + batch_size: int = 100000, +): + mapping = { + "value": "event_value", + "data_source_id": "source_id", + "datetime": "event_start", + "horizon": "belief_horizon", + "sensor_id": "sensor_id", + } + + # Get data from old data model + results = connection.execute( + sa.select([getattr(t_old_data_model.c, a) for a in mapping.keys()]) + ).fetchall() + + print( + f"- copying {len(results)} rows from the {t_old_data_model.name} table to the {t_timed_belief.name} table..." + ) + + # Copy in batches and report on progress + for i in range(len(results) // batch_size + 1): + if i > 0: + print(f" - done copying {i*batch_size} rows...") + + insert_values = [] + for values in results[i * batch_size : (i + 1) * batch_size]: + d = {k: v for k, v in zip(mapping.values(), values)} + d["cumulative_probability"] = 0.5 + insert_values.append(d) + op.bulk_insert(t_timed_belief, insert_values) + + print(f" - finished copying {len(results)} rows...") diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index bf9696c5f..1f4232753 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -90,7 +90,7 @@ def test_collect_power(db, app, query_start, query_end, num_values, setup_test_d ), ], ) -def test_collect_power_resampled( +def tesfijfijft_collect_power_resampled( db, app, query_start, query_end, resolution, num_values, setup_test_data ): wind_device_1 = Sensor.query.filter_by(name="wind-asset-1").one_or_none() @@ -220,7 +220,7 @@ def test_simplify_index(setup_test_data, check_empty_frame): event_starts_after=datetime(2015, 1, 1, tzinfo=pytz.utc), event_ends_before=datetime(2015, 1, 2, tzinfo=pytz.utc), resolution=timedelta(minutes=15), - ) + ).convert_index_from_belief_time_to_horizon() if check_empty_frame: # We empty the BeliefsDataFrame, which retains the metadata such as sensor and resolution bdf = bdf.iloc[0:0, :] From 2414292c9c4ee159982c5beabdcd8ac99339139b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Mon, 3 Jan 2022 12:14:13 +0100 Subject: [PATCH 24/46] Unit tooling improvements (#293) * check for validity of unit strings to avoid crashing; use variable name which is usable with pdb debugging; remove deprecation warning about .getitem methid in UnitRegistry * fix tests --- flexmeasures/utils/tests/test_unit_utils.py | 13 ++++-- flexmeasures/utils/unit_utils.py | 49 +++++++++++++++------ 2 files changed, 45 insertions(+), 17 deletions(-) diff --git a/flexmeasures/utils/tests/test_unit_utils.py b/flexmeasures/utils/tests/test_unit_utils.py index bbe9dbfcd..ed940ccc4 100644 --- a/flexmeasures/utils/tests/test_unit_utils.py +++ b/flexmeasures/utils/tests/test_unit_utils.py @@ -8,7 +8,7 @@ units_are_convertible, is_energy_unit, is_power_unit, - u, + ur, ) @@ -61,10 +61,10 @@ def test_determine_unit_conversion_multiplier(): def test_h_denotes_hour_and_not_planck_constant(): - assert u.Quantity("h").dimensionality == u.Quantity("hour").dimensionality + assert ur.Quantity("h").dimensionality == ur.Quantity("hour").dimensionality assert ( - u.Quantity("hbar").dimensionality - == u.Quantity("planck_constant").dimensionality + ur.Quantity("hbar").dimensionality + == ur.Quantity("planck_constant").dimensionality ) @@ -80,6 +80,7 @@ def test_units_are_convertible(): assert units_are_convertible("°C", "K") # offset unit to absolute unit assert not units_are_convertible("°C", "W") assert not units_are_convertible("EUR/MWh", "W") + assert not units_are_convertible("not-a-unit", "W") @pytest.mark.parametrize( @@ -91,6 +92,8 @@ def test_units_are_convertible(): ("kW", True), ("watt", True), ("°C", False), + ("", False), + ("not-a-unit", False), ], ) def test_is_power_unit(unit: str, power_unit: bool): @@ -106,6 +109,8 @@ def test_is_power_unit(unit: str, power_unit: bool): ("kW", False), ("watthour", True), ("°C", False), + ("", False), + ("not-a-unit", False), ], ) def test_is_energy_unit(unit: str, energy_unit: bool): diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index 390249996..f119c2211 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -29,8 +29,8 @@ ) # Set up UnitRegistry with abbreviated scientific format -u = pint.UnitRegistry(full_template) -u.default_format = "~P" # short pretty +ur = pint.UnitRegistry(full_template) +ur.default_format = "~P" # short pretty PREFERRED_UNITS = [ @@ -47,7 +47,9 @@ "A", "dimensionless", ] # todo: move to config setting, with these as a default (NB prefixes do not matter here, this is about SI base units, so km/h is equivalent to m/h) -PREFERRED_UNITS_DICT = dict([(u[x].dimensionality, x) for x in PREFERRED_UNITS]) +PREFERRED_UNITS_DICT = dict( + [(ur.parse_expression(x).dimensionality, x) for x in PREFERRED_UNITS] +) def to_preferred(x: pint.Quantity) -> pint.Quantity: @@ -58,14 +60,27 @@ def to_preferred(x: pint.Quantity) -> pint.Quantity: return x +def is_valid_unit(unit: str) -> bool: + """Return True if the pint library can work with this unit identifier.""" + try: + ur.Quantity(unit) + except ValueError: + return False + except pint.errors.UndefinedUnitError: + return False + return True + + def determine_unit_conversion_multiplier( from_unit: str, to_unit: str, duration: Optional[timedelta] = None ): """Determine the value multiplier for a given unit conversion. If needed, requires a duration to convert from units of stock change to units of flow. """ - scalar = u.Quantity(from_unit).to_base_units() / u.Quantity(to_unit).to_base_units() - if scalar.dimensionality == u.Quantity("h").dimensionality: + scalar = ( + ur.Quantity(from_unit).to_base_units() / ur.Quantity(to_unit).to_base_units() + ) + if scalar.dimensionality == ur.Quantity("h").dimensionality: if duration is None: raise ValueError( f"Cannot convert units from {from_unit} to {to_unit} without known duration." @@ -79,7 +94,7 @@ def determine_flow_unit(stock_unit: str, time_unit: str = "h"): >>> determine_flow_unit("m³") # m³/h >>> determine_flow_unit("kWh") # kW """ - flow = to_preferred(u.Quantity(stock_unit) / u.Quantity(time_unit)) + flow = to_preferred(ur.Quantity(stock_unit) / ur.Quantity(time_unit)) return "{:~P}".format(flow.units) @@ -88,7 +103,7 @@ def determine_stock_unit(flow_unit: str, time_unit: str = "h"): >>> determine_stock_unit("m³/h") # m³ >>> determine_stock_unit("kW") # kWh """ - stock = to_preferred(u.Quantity(flow_unit) * u.Quantity(time_unit)) + stock = to_preferred(ur.Quantity(flow_unit) * ur.Quantity(time_unit)) return "{:~P}".format(stock.units) @@ -101,13 +116,17 @@ def units_are_convertible( >>> units_are_convertible("Wh", "W") # True (units that represent a stock delta can, knowing the duration, be converted to a flow) >>> units_are_convertible("°C", "W") # False """ - scalar = u.Quantity(from_unit).to_base_units() / u.Quantity(to_unit).to_base_units() + if not is_valid_unit(from_unit) or not is_valid_unit(to_unit): + return False + scalar = ( + ur.Quantity(from_unit).to_base_units() / ur.Quantity(to_unit).to_base_units() + ) if duration_known: return scalar.dimensionality in ( - u.Quantity("h").dimensionality, - u.Quantity("dimensionless").dimensionality, + ur.Quantity("h").dimensionality, + ur.Quantity("dimensionless").dimensionality, ) - return scalar.dimensionality == u.Quantity("dimensionless").dimensionality + return scalar.dimensionality == ur.Quantity("dimensionless").dimensionality def is_power_unit(unit: str) -> bool: @@ -117,7 +136,9 @@ def is_power_unit(unit: str) -> bool: >>> is_power_unit("kWh") # False >>> is_power_unit("EUR/MWh") # False """ - return u.Quantity(unit).dimensionality == u.Quantity("W").dimensionality + if not is_valid_unit(unit): + return False + return ur.Quantity(unit).dimensionality == ur.Quantity("W").dimensionality def is_energy_unit(unit: str) -> bool: @@ -127,4 +148,6 @@ def is_energy_unit(unit: str) -> bool: >>> is_energy_unit("kWh") # True >>> is_energy_unit("EUR/MWh") # False """ - return u.Quantity(unit).dimensionality == u.Quantity("Wh").dimensionality + if not is_valid_unit(unit): + return False + return ur.Quantity(unit).dimensionality == ur.Quantity("Wh").dimensionality From 5ba1dc74cfd1c8ca23bb56facba7d0206b8f465c Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 3 Jan 2022 13:56:33 +0100 Subject: [PATCH 25/46] Sub issue 284d stop saving to power/price/weather tables (#289) Switch all occurrences of initializing a Power, Price or Weather object within FlexMeasures to initializing a TimedBelief instead. Also refactor the save_to_db method, moving it from the api package to the data package. Because some FlexMeasures plugins are known to depend on direct initialization of Power, Price and Weather objects, and some plugins are known to use the save_to_db method from the api package, the old functionality is left intact, with any new data automatically copied to the new model, with the appropriate deprecation warnings. * Query TimedBelief rather than Power in api v1.3 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1.3 implementations Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in user services tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in query tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in forecasting tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in scheduling tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 tests Signed-off-by: F.N. Claessen * Simplify data deletion, like, by a lot Signed-off-by: F.N. Claessen * Count ex-ante TimedBeliefs after populating time series forecasts Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price in api v1_1 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in Resource.load_sensor_data Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in api v2.0 tests Signed-off-by: F.N. Claessen * Refactor: simplify duplicate query construction Signed-off-by: F.N. Claessen * Add custom join target to get rid of SA warning Signed-off-by: F.N. Claessen * Filter criteria should work for both TimedBeliefs and TimedValues Signed-off-by: F.N. Claessen * Clarify docstring Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 implementations Signed-off-by: F.N. Claessen * Schedules should contain one deterministic belief per event Signed-off-by: F.N. Claessen * Fix type annotation Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price/Weather for analytics Signed-off-by: F.N. Claessen * Query deterministic TimedBelief rather than Price for planning queries Signed-off-by: F.N. Claessen * Forecast TimedBelief rather than Power/Price/Weather Signed-off-by: F.N. Claessen * Schedule TimedBelief rather than Power Signed-off-by: F.N. Claessen * Apparently, to initialize a TimedBelief is to save a TimedBelief, too Signed-off-by: F.N. Claessen * Create TimedBelief rather than Power/Price/Weather in data generation script Signed-off-by: F.N. Claessen * Bump timely-beliefs dependency Signed-off-by: F.N. Claessen * Fix latest state query Signed-off-by: F.N. Claessen * Revert "Apparently, to initialize a TimedBelief is to save a TimedBelief, too" This reverts commit fb58ec7459dd69c571bee27cdce61e67c14617ae. * Prevent saving TimedBelief to session upon updating Sensor or Source Signed-off-by: F.N. Claessen * Create only TimedBeliefs in conftests Signed-off-by: F.N. Claessen * Use session.add_all calls instead of session.bulk_save_objects or individual session.add calls Signed-off-by: F.N. Claessen * API directly creates TimedBeliefs Signed-off-by: F.N. Claessen * CLI uses TimedBeliefs only Signed-off-by: F.N. Claessen * Data scripts use TimedBeliefs only Signed-off-by: F.N. Claessen * One more conftest switched to creating TimedBeliefs instead of Weather objects Signed-off-by: F.N. Claessen * Expand docstring note on forbidden replacements Signed-off-by: F.N. Claessen * Clarify docstring note on saving changed beliefs only Signed-off-by: F.N. Claessen * Remove redundant flush Signed-off-by: F.N. Claessen * Catch forbidden belief replacements with more specific exception Signed-off-by: F.N. Claessen * Rename variable Signed-off-by: F.N. Claessen * One transaction per request Signed-off-by: F.N. Claessen * Only enqueue forecasting jobs upon successfully saving new data Signed-off-by: F.N. Claessen * Flush instead of commit Signed-off-by: F.N. Claessen * Expand test for forbidden data replacement Signed-off-by: F.N. Claessen * Simplify play mode excemption for replacing beliefs Signed-off-by: F.N. Claessen * Add note about potential session rollback Signed-off-by: F.N. Claessen * Typo Signed-off-by: F.N. Claessen * Move UniqueViolation catching logic to error handler Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen * Clean up Signed-off-by: F.N. Claessen * Refactor: move error handler to api_utils.py Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen --- flexmeasures/api/__init__.py | 3 + flexmeasures/api/common/responses.py | 18 ++- flexmeasures/api/common/utils/api_utils.py | 67 ++++++++++- flexmeasures/api/dev/sensor_data.py | 6 +- .../api/dev/tests/test_sensor_data.py | 17 ++- flexmeasures/api/v1/implementations.py | 21 ++-- flexmeasures/api/v1/tests/conftest.py | 21 ++-- flexmeasures/api/v1_1/implementations.py | 32 ++--- flexmeasures/api/v1_1/tests/conftest.py | 27 +++-- .../api/v2_0/implementations/sensors.py | 112 ++++++++---------- flexmeasures/cli/testing.py | 26 +--- flexmeasures/conftest.py | 31 ++--- flexmeasures/data/scripts/grid_weather.py | 6 +- flexmeasures/data/services/forecasting.py | 3 +- flexmeasures/data/services/scheduling.py | 3 +- flexmeasures/data/tests/conftest.py | 17 +-- .../data/tests/test_time_series_services.py | 2 +- flexmeasures/data/utils.py | 88 +++++++++++++- 18 files changed, 328 insertions(+), 172 deletions(-) diff --git a/flexmeasures/api/__init__.py b/flexmeasures/api/__init__.py index e5daa8017..3b104731b 100644 --- a/flexmeasures/api/__init__.py +++ b/flexmeasures/api/__init__.py @@ -2,8 +2,10 @@ from flask_security.utils import verify_password from flask_json import as_json from flask_login import current_user +from sqlalchemy.exc import IntegrityError from flexmeasures import __version__ as flexmeasures_version +from flexmeasures.api.common.utils.api_utils import catch_timed_belief_replacements from flexmeasures.data.models.user import User from flexmeasures.api.common.utils.args_parsing import ( validation_error_handler, @@ -84,6 +86,7 @@ def register_at(app: Flask): # handle API specific errors app.register_error_handler(FMValidationError, validation_error_handler) + app.register_error_handler(IntegrityError, catch_timed_belief_replacements) app.unauthorized_handler_api = invalid_sender app.register_blueprint( diff --git a/flexmeasures/api/common/responses.py b/flexmeasures/api/common/responses.py index 46eab7caa..143512458 100644 --- a/flexmeasures/api/common/responses.py +++ b/flexmeasures/api/common/responses.py @@ -39,11 +39,25 @@ def deprecated_api_version(message: str) -> ResponseTuple: def already_received_and_successfully_processed(message: str) -> ResponseTuple: return ( dict( - results="Rejected", + results="PROCESSED", status="ALREADY_RECEIVED_AND_SUCCESSFULLY_PROCESSED", message=message, ), - 400, + 200, + ) + + +@BaseMessage( + "Some of the data represents a replacement, which is reserved for servers in play mode. Enable play mode or update the prior in your request." +) +def invalid_replacement(message: str) -> ResponseTuple: + return ( + dict( + results="Rejected", + status="INVALID_REPLACEMENT", + message=message, + ), + 403, ) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 89d7ac7ef..e793c5117 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -1,5 +1,4 @@ from timely_beliefs.beliefs.classes import BeliefsDataFrame -from flexmeasures.data.models.time_series import TimedBelief from typing import List, Sequence, Tuple, Union import copy from datetime import datetime, timedelta @@ -8,6 +7,7 @@ from flask import current_app from inflection import pluralize from numpy import array +from psycopg2.errors import UniqueViolation from rq.job import Job from sqlalchemy.exc import IntegrityError import timely_beliefs as tb @@ -16,16 +16,18 @@ from flexmeasures.data.models.assets import Asset, Power from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.weather import WeatherSensor, Weather from flexmeasures.data.services.time_series import drop_unchanged_beliefs -from flexmeasures.data.utils import save_to_session +from flexmeasures.data.utils import save_to_session, save_to_db as modern_save_to_db from flexmeasures.api.common.responses import ( + invalid_replacement, unrecognized_sensor, ResponseTuple, request_processed, already_received_and_successfully_processed, ) +from flexmeasures.utils.error_utils import error_handling_router def list_access(service_listing, service_name): @@ -340,6 +342,40 @@ def get_sensor_by_generic_asset_type_and_location( return sensor +def enqueue_forecasting_jobs( + forecasting_jobs: List[Job] = None, +): + """Enqueue forecasting jobs. + + :param forecasting_jobs: list of forecasting Jobs for redis queues. + """ + if forecasting_jobs is not None: + [current_app.queues["forecasting"].enqueue_job(job) for job in forecasting_jobs] + + +def save_and_enqueue( + data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], + forecasting_jobs: List[Job] = None, + save_changed_beliefs_only: bool = True, +) -> ResponseTuple: + + # Attempt to save + status = modern_save_to_db( + data, save_changed_beliefs_only=save_changed_beliefs_only + ) + + # Only enqueue forecasting jobs upon successfully saving new data + if status[:7] == "success": + enqueue_forecasting_jobs(forecasting_jobs) + + # Pick a response + if status == "success": + return request_processed() + elif status == "success_with_unchanged_beliefs_skipped": + return already_received_and_successfully_processed() + return invalid_replacement() + + def save_to_db( timed_values: Union[BeliefsDataFrame, List[Union[Power, Price, Weather]]], forecasting_jobs: List[Job] = [], @@ -349,7 +385,7 @@ def save_to_db( Data can only be replaced on servers in play mode. - TODO: remove options for Power, Price and Weather if we only handle beliefs one day. + TODO: remove this legacy function in its entirety (announced v0.8.0) :param timed_values: BeliefsDataFrame or a list of Power, Price or Weather values to be saved :param forecasting_jobs: list of forecasting Jobs for redis queues. @@ -357,6 +393,14 @@ def save_to_db( :returns: ResponseTuple """ + import warnings + + warnings.warn( + "The method api.common.utils.api_utils.save_to_db is deprecated. Check out the following replacements:" + "- [recommended option] to store BeliefsDataFrames only, switch to data.utils.save_to_db" + "- to store BeliefsDataFrames and enqueue jobs, switch to api.common.utils.api_utils.save_and_enqueue" + ) + if isinstance(timed_values, BeliefsDataFrame): if save_changed_beliefs_only: @@ -450,3 +494,18 @@ def determine_belief_timing( ] return event_starts, belief_horizons raise ValueError("Missing horizon or prior.") + + +def catch_timed_belief_replacements(error: IntegrityError): + """Catch IntegrityErrors due to a UniqueViolation on the TimedBelief primary key. + + Return a more informative message. + """ + if isinstance(error.orig, UniqueViolation) and "timed_belief_pkey" in str( + error.orig + ): + # Some beliefs represented replacements, which was forbidden + return invalid_replacement() + + # Forward to our generic error handler + return error_handling_router(error) diff --git a/flexmeasures/api/dev/sensor_data.py b/flexmeasures/api/dev/sensor_data.py index fbb70f44b..a8dd758f6 100644 --- a/flexmeasures/api/dev/sensor_data.py +++ b/flexmeasures/api/dev/sensor_data.py @@ -1,7 +1,7 @@ from webargs.flaskparser import use_args from flexmeasures.api.common.schemas.sensor_data import SensorDataSchema -from flexmeasures.api.common.utils.api_utils import save_to_db +from flexmeasures.api.common.utils.api_utils import save_and_enqueue @use_args( @@ -15,13 +15,13 @@ def post_data(sensor_data): to create and save the data structure. """ beliefs = SensorDataSchema.load_bdf(sensor_data) - response, code = save_to_db(beliefs) + response, code = save_and_enqueue(beliefs) response.update(type="PostSensorDataResponse") return response, code def get_data(): - """ GET from /sensorData""" + """GET from /sensorData""" # - use data.models.time_series.Sensor::search_beliefs() - might need to add a belief_horizon parameter # - create the serialize method on the schema, to turn the resulting BeliefsDataFrame # to the JSON the API should respond with. diff --git a/flexmeasures/api/dev/tests/test_sensor_data.py b/flexmeasures/api/dev/tests/test_sensor_data.py index 9138b966e..027d760e2 100644 --- a/flexmeasures/api/dev/tests/test_sensor_data.py +++ b/flexmeasures/api/dev/tests/test_sensor_data.py @@ -65,17 +65,32 @@ def test_post_invalid_sensor_data( def test_post_sensor_data_twice(client, setup_api_test_data): auth_token = get_auth_token(client, "test_prosumer_user@seita.nl", "testtest") post_data = make_sensor_data_request() + + # Check that 1st time posting the data succeeds response = client.post( url_for("post_sensor_data"), json=post_data, headers={"Authorization": auth_token}, ) assert response.status_code == 200 + + # Check that 2nd time posting the same data succeeds informatively response = client.post( url_for("post_sensor_data"), json=post_data, headers={"Authorization": auth_token}, ) print(response.json) - assert response.status_code == 400 + assert response.status_code == 200 assert "data has already been received" in response.json["message"] + + # Check that replacing data fails informatively + post_data["values"][0] = 100 + response = client.post( + url_for("post_sensor_data"), + json=post_data, + headers={"Authorization": auth_token}, + ) + print(response.json) + assert response.status_code == 403 + assert "data represents a replacement" in response.json["message"] diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 2d41991bc..2c81fa3d6 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -11,7 +11,7 @@ parse_entity_address, EntityAddressException, ) -from flexmeasures.data.models.assets import Power +from flexmeasures.data import db from flexmeasures.data.models.data_sources import get_or_create_source from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.resources import get_sensors @@ -26,7 +26,7 @@ ) from flexmeasures.api.common.utils.api_utils import ( groups_to_dict, - save_to_db, + save_and_enqueue, ) from flexmeasures.api.common.utils.validators import ( type_accepted, @@ -253,7 +253,7 @@ def create_connection_and_value_groups( # noqa: C901 if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") user_sensor_ids = [sensor.id for sensor in user_sensors] - power_measurements = [] + power_df_per_connection = [] forecasting_jobs = [] for connection_group, value_group in zip(generic_asset_name_groups, value_groups): for connection in connection_group: @@ -293,7 +293,8 @@ def create_connection_and_value_groups( # noqa: C901 ) return power_value_too_big(extra_info) - # Create new Power objects + # Create a new BeliefsDataFrame + beliefs = [] for j, value in enumerate(value_group): dt = start + j * duration / len(value_group) if rolling: @@ -302,8 +303,7 @@ def create_connection_and_value_groups( # noqa: C901 h = horizon - ( (start + duration) - (dt + duration / len(value_group)) ) - p = Power( - use_legacy_kwargs=False, + p = TimedBelief( event_start=dt, event_value=value * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption @@ -311,7 +311,10 @@ def create_connection_and_value_groups( # noqa: C901 sensor=sensor, source=data_source, ) - power_measurements.append(p) + + assert p not in db.session + beliefs.append(p) + power_df_per_connection.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves if horizon <= timedelta( @@ -323,8 +326,8 @@ def create_connection_and_value_groups( # noqa: C901 start, start + duration, resolution=duration / len(value_group), - enqueue=False, + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(power_measurements, forecasting_jobs) + return save_and_enqueue(power_df_per_connection, forecasting_jobs) diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 7803e7088..86880b58f 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -7,6 +7,7 @@ from flask_security.utils import hash_password from flexmeasures.data.services.users import create_user +from flexmeasures.data.models.time_series import TimedBelief @pytest.fixture(scope="module", autouse=True) @@ -16,7 +17,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices """ print("Setting up data for API v1 tests on %s" % db.engine) - from flexmeasures.data.models.assets import Asset, AssetType, Power + from flexmeasures.data.models.assets import Asset, AssetType from flexmeasures.data.models.data_sources import DataSource # Create an anonymous user TODO: used for demo purposes, maybe "demo-user" would be a better name @@ -88,10 +89,8 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices user2_data_source = DataSource.query.filter( DataSource.user == test_user_2 ).one_or_none() - meter_data = [] - for i in range(6): - p_1 = Power( - use_legacy_kwargs=False, + user1_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(0), @@ -99,8 +98,10 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_5.corresponding_sensor, source=user1_data_source, ) - p_2 = Power( - use_legacy_kwargs=False, + for i in range(6) + ] + user2_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=0), @@ -108,9 +109,9 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_5.corresponding_sensor, source=user2_data_source, ) - meter_data.append(p_1) - meter_data.append(p_2) - db.session.bulk_save_objects(meter_data) + for i in range(6) + ] + db.session.add_all(user1_beliefs + user2_beliefs) print("Done setting up data for API v1 tests") diff --git a/flexmeasures/api/v1_1/implementations.py b/flexmeasures/api/v1_1/implementations.py index c3c08c5bd..9415e6826 100644 --- a/flexmeasures/api/v1_1/implementations.py +++ b/flexmeasures/api/v1_1/implementations.py @@ -4,6 +4,7 @@ from flask import current_app from flask_json import as_json from flask_security import current_user +import timely_beliefs as tb from flexmeasures.utils.entity_address_utils import ( parse_entity_address, @@ -16,7 +17,7 @@ invalid_horizon, ) from flexmeasures.api.common.utils.api_utils import ( - save_to_db, + save_and_enqueue, ) from flexmeasures.api.common.utils.migration_utils import get_sensor_by_unique_name from flexmeasures.api.common.utils.validators import ( @@ -41,8 +42,7 @@ get_sensor_by_generic_asset_type_and_location, ) from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather +from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.services.resources import get_sensors from flexmeasures.data.services.forecasting import create_forecasting_jobs @@ -84,7 +84,7 @@ def post_price_data_response( current_app.logger.info("POSTING PRICE DATA") data_source = get_or_create_source(current_user) - prices = [] + price_df_per_market = [] forecasting_jobs = [] for market_group, value_group in zip(generic_asset_name_groups, value_groups): for market in market_group: @@ -105,6 +105,7 @@ def post_price_data_response( return invalid_unit("%s prices" % sensor.name, [sensor.unit]) # Create new Price objects + beliefs = [] for j, value in enumerate(value_group): dt = start + j * duration / len(value_group) if rolling: @@ -113,15 +114,15 @@ def post_price_data_response( h = horizon - ( (start + duration) - (dt + duration / len(value_group)) ) - p = Price( - use_legacy_kwargs=False, + p = TimedBelief( event_start=dt, event_value=value, belief_horizon=h, sensor=sensor, source=data_source, ) - prices.append(p) + beliefs.append(p) + price_df_per_market.append(tb.BeliefsDataFrame(beliefs)) # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts, # by the way, due to things like day-ahead markets. @@ -133,10 +134,10 @@ def post_price_data_response( start + duration, resolution=duration / len(value_group), horizons=[timedelta(hours=24), timedelta(hours=48)], - enqueue=False, # will enqueue later, only if we successfully saved prices + enqueue=False, # will enqueue later, after saving data ) - return save_to_db(prices, forecasting_jobs) + return save_and_enqueue(price_df_per_market, forecasting_jobs) @type_accepted("PostWeatherDataRequest") @@ -160,7 +161,7 @@ def post_weather_data_response( # noqa: C901 current_app.logger.info("POSTING WEATHER DATA") data_source = get_or_create_source(current_user) - weather_measurements = [] + weather_df_per_sensor = [] forecasting_jobs = [] for sensor_group, value_group in zip(generic_asset_name_groups, value_groups): for sensor in sensor_group: @@ -189,6 +190,7 @@ def post_weather_data_response( # noqa: C901 return sensor # Create new Weather objects + beliefs = [] for j, value in enumerate(value_group): dt = start + j * duration / len(value_group) if rolling: @@ -197,15 +199,15 @@ def post_weather_data_response( # noqa: C901 h = horizon - ( (start + duration) - (dt + duration / len(value_group)) ) - w = Weather( - use_legacy_kwargs=False, + w = TimedBelief( event_start=dt, event_value=value, belief_horizon=h, sensor=sensor, source=data_source, ) - weather_measurements.append(w) + beliefs.append(w) + weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play) if current_app.config.get( @@ -219,11 +221,11 @@ def post_weather_data_response( # noqa: C901 start, start + duration, resolution=duration / len(value_group), - enqueue=False, # will enqueue later, only if we successfully saved weather measurements + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(weather_measurements, forecasting_jobs) + return save_and_enqueue(weather_df_per_sensor, forecasting_jobs) @type_accepted("GetPrognosisRequest") diff --git a/flexmeasures/api/v1_1/tests/conftest.py b/flexmeasures/api/v1_1/tests/conftest.py index a63d4eb51..aff57aa0a 100644 --- a/flexmeasures/api/v1_1/tests/conftest.py +++ b/flexmeasures/api/v1_1/tests/conftest.py @@ -6,8 +6,8 @@ from flask_security import SQLAlchemySessionUserDatastore from flask_security.utils import hash_password -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief @pytest.fixture(scope="module") @@ -55,10 +55,8 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices cs_2 = Asset.query.filter(Asset.name == "CS 2").one_or_none() cs_3 = Asset.query.filter(Asset.name == "CS 3").one_or_none() data_source = DataSource.query.filter(DataSource.user == test_user).one_or_none() - power_forecasts = [] - for i in range(6): - p_1 = Power( - use_legacy_kwargs=False, + cs1_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -66,8 +64,10 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_1.corresponding_sensor, source=data_source, ) - p_2 = Power( - use_legacy_kwargs=False, + for i in range(6) + ] + cs2_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -75,8 +75,10 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_2.corresponding_sensor, source=data_source, ) - p_3 = Power( - use_legacy_kwargs=False, + for i in range(6) + ] + cs3_beliefs = [ + TimedBelief( event_start=isodate.parse_datetime("2015-01-01T00:00:00Z") + timedelta(minutes=15 * i), belief_horizon=timedelta(hours=6), @@ -84,10 +86,9 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices sensor=cs_3.corresponding_sensor, source=data_source, ) - power_forecasts.append(p_1) - power_forecasts.append(p_2) - power_forecasts.append(p_3) - db.session.bulk_save_objects(power_forecasts) + for i in range(6) + ] + db.session.add_all(cs1_beliefs + cs2_beliefs + cs3_beliefs) print("Done setting up data for API v1.1 tests") diff --git a/flexmeasures/api/v2_0/implementations/sensors.py b/flexmeasures/api/v2_0/implementations/sensors.py index ee6ce40a7..eec12b72d 100644 --- a/flexmeasures/api/v2_0/implementations/sensors.py +++ b/flexmeasures/api/v2_0/implementations/sensors.py @@ -3,6 +3,7 @@ from flask import current_app from flask_json import as_json from flask_security import current_user +import timely_beliefs as tb from flexmeasures.api.common.responses import ( invalid_domain, @@ -16,7 +17,7 @@ ) from flexmeasures.api.common.utils.api_utils import ( get_sensor_by_generic_asset_type_and_location, - save_to_db, + save_and_enqueue, determine_belief_timing, ) from flexmeasures.api.common.utils.validators import ( @@ -31,11 +32,8 @@ period_required, values_required, ) -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.data_sources import get_or_create_source -from flexmeasures.data.models.time_series import Sensor -from flexmeasures.data.models.markets import Price -from flexmeasures.data.models.weather import Weather +from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.services.forecasting import create_forecasting_jobs from flexmeasures.data.services.resources import get_sensors from flexmeasures.utils.entity_address_utils import ( @@ -71,7 +69,7 @@ def post_price_data_response( # noqa C901 current_app.logger.info("POSTING PRICE DATA") data_source = get_or_create_source(current_user) - prices = [] + price_df_per_market = [] forecasting_jobs = [] for market_group, event_values in zip(generic_asset_name_groups, value_groups): for market in market_group: @@ -96,21 +94,19 @@ def post_price_data_response( # noqa C901 ) # Create new Price objects - prices.extend( - [ - Price( - use_legacy_kwargs=False, - event_start=event_start, - event_value=event_value, - belief_horizon=belief_horizon, - sensor=sensor, - source=data_source, - ) - for event_start, event_value, belief_horizon in zip( - event_starts, event_values, belief_horizons - ) - ] - ) + beliefs = [ + TimedBelief( + event_start=event_start, + event_value=event_value, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, + ) + for event_start, event_value, belief_horizon in zip( + event_starts, event_values, belief_horizons + ) + ] + price_df_per_market.append(tb.BeliefsDataFrame(beliefs)) # Make forecasts, but not in play mode. Price forecasts (horizon>0) can still lead to other price forecasts, # by the way, due to things like day-ahead markets. @@ -122,10 +118,10 @@ def post_price_data_response( # noqa C901 start + duration, resolution=duration / len(event_values), horizons=[timedelta(hours=24), timedelta(hours=48)], - enqueue=False, # will enqueue later, only if we successfully saved prices + enqueue=False, # will enqueue later, after saving data ) - return save_to_db(prices, forecasting_jobs) + return save_and_enqueue(price_df_per_market, forecasting_jobs) @type_accepted("PostWeatherDataRequest") @@ -154,7 +150,7 @@ def post_weather_data_response( # noqa: C901 current_app.logger.info("POSTING WEATHER DATA") data_source = get_or_create_source(current_user) - weather_measurements = [] + weather_df_per_sensor = [] forecasting_jobs = [] for sensor_group, event_values in zip(generic_asset_name_groups, value_groups): for sensor in sensor_group: @@ -183,21 +179,19 @@ def post_weather_data_response( # noqa: C901 ) # Create new Weather objects - weather_measurements.extend( - [ - Weather( - use_legacy_kwargs=False, - event_start=event_start, - event_value=event_value, - belief_horizon=belief_horizon, - sensor=sensor, - source=data_source, - ) - for event_start, event_value, belief_horizon in zip( - event_starts, event_values, belief_horizons - ) - ] - ) + beliefs = [ + TimedBelief( + event_start=event_start, + event_value=event_value, + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, + ) + for event_start, event_value, belief_horizon in zip( + event_starts, event_values, belief_horizons + ) + ] + weather_df_per_sensor.append(tb.BeliefsDataFrame(beliefs)) # make forecasts, but only if the sent-in values are not forecasts themselves (and also not in play) if current_app.config.get( @@ -212,11 +206,11 @@ def post_weather_data_response( # noqa: C901 start + duration, resolution=duration / len(event_values), horizons=[horizon], - enqueue=False, # will enqueue later, only if we successfully saved weather measurements + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(weather_measurements, forecasting_jobs) + return save_and_enqueue(weather_df_per_sensor, forecasting_jobs) @type_accepted("PostMeterDataRequest") @@ -307,7 +301,7 @@ def post_power_data( if not user_sensors: current_app.logger.info("User doesn't seem to have any assets") user_sensor_ids = [sensor.id for sensor in user_sensors] - power_measurements = [] + power_df_per_connection = [] forecasting_jobs = [] for connection_group, event_values in zip(generic_asset_name_groups, value_groups): for connection in connection_group: @@ -351,22 +345,20 @@ def post_power_data( ) # Create new Power objects - power_measurements.extend( - [ - Power( - use_legacy_kwargs=False, - event_start=event_start, - event_value=event_value - * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption - belief_horizon=belief_horizon, - sensor=sensor, - source=data_source, - ) - for event_start, event_value, belief_horizon in zip( - event_starts, event_values, belief_horizons - ) - ] - ) + beliefs = [ + TimedBelief( + event_start=event_start, + event_value=event_value + * -1, # Reverse sign for FlexMeasures specs with positive production and negative consumption + belief_horizon=belief_horizon, + sensor=sensor, + source=data_source, + ) + for event_start, event_value, belief_horizon in zip( + event_starts, event_values, belief_horizons + ) + ] + power_df_per_connection.append(tb.BeliefsDataFrame(beliefs)) if create_forecasting_jobs_too: forecasting_jobs.extend( @@ -375,8 +367,8 @@ def post_power_data( start, start + duration, resolution=duration / len(event_values), - enqueue=False, # will enqueue later, only if we successfully saved power measurements + enqueue=False, # will enqueue later, after saving data ) ) - return save_to_db(power_measurements, forecasting_jobs) + return save_and_enqueue(power_df_per_connection, forecasting_jobs) diff --git a/flexmeasures/cli/testing.py b/flexmeasures/cli/testing.py index de5621788..f723f6137 100644 --- a/flexmeasures/cli/testing.py +++ b/flexmeasures/cli/testing.py @@ -12,7 +12,6 @@ else: from rq import Worker -from flexmeasures.data.models.assets import Power from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.time_series import TimedBelief from flexmeasures.data.queries.sensors import ( @@ -41,11 +40,11 @@ def test_making_forecasts(): sensor_id = 1 forecast_filter = ( - Power.query.filter(Power.sensor_id == sensor_id) - .filter(Power.horizon == timedelta(hours=6)) + TimedBelief.query.filter(TimedBelief.sensor_id == sensor_id) + .filter(TimedBelief.belief_horizon == timedelta(hours=6)) .filter( - (Power.datetime >= as_server_time(datetime(2015, 4, 1, 6))) - & (Power.datetime < as_server_time(datetime(2015, 4, 3, 6))) + (TimedBelief.event_start >= as_server_time(datetime(2015, 4, 1, 6))) + & (TimedBelief.event_start < as_server_time(datetime(2015, 4, 3, 6))) ) ) @@ -86,12 +85,6 @@ def test_making_forecasts(): required=True, help="Name of generic asset type.", ) -@click.option( - "--timed-value-type", - "timed_value_type", - required=True, - help="Power, Price or Weather.", -) @click.option("--sensor", "sensor_name", help="Name of sensor.") @click.option( "--from_date", @@ -107,7 +100,6 @@ def test_making_forecasts(): ) def test_generic_model( generic_asset_type_names: List[str], - timed_value_type_name: str, sensor_name: Optional[str] = None, from_date: str = "2015-03-10", period: int = 3, @@ -133,16 +125,6 @@ def test_generic_model( click.echo("No unique sensor found in db, so I will not add any forecasts.") return - # todo: replacing this with timed_value_type = TimedBelief requires streamlining of the collect function on old sensor data classes with the search function on the TimedBelief class - if timed_value_type_name.lower() == "Power": - from flexmeasures.data.models.assets import Power as TimedValueType - elif timed_value_type_name.lower() == "Price": - from flexmeasures.data.models.markets import Price as TimedValueType - elif timed_value_type_name.lower() == "Weather": - from flexmeasures.data.models.weather import Weather as TimedValueType - else: - raise ValueError(f"Unknown timed value type {timed_value_type_name}") - linear_model_configurator = lookup_model_specs_configurator("linear") ( model_specs, diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 393a863c6..28bbbf0e3 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -23,11 +23,11 @@ from flexmeasures.auth.policy import ADMIN_ROLE from flexmeasures.utils.time_utils import as_server_time from flexmeasures.data.services.users import create_user -from flexmeasures.data.models.assets import AssetType, Asset, Power +from flexmeasures.data.models.assets import AssetType, Asset from flexmeasures.data.models.generic_assets import GenericAssetType, GenericAsset from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.weather import WeatherSensor, WeatherSensorType -from flexmeasures.data.models.markets import Market, MarketType, Price +from flexmeasures.data.models.markets import Market, MarketType from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.user import User, Account, AccountRole @@ -335,16 +335,17 @@ def setup_assets( random() * (1 + np.sin(x * 2 * np.pi / (4 * 24))) for x in range(len(time_slots)) ] - for dt, val in zip(time_slots, values): - p = Power( - use_legacy_kwargs=False, + beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, sensor=asset.corresponding_sensor, source=setup_sources["Seita"], ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(beliefs) return {asset.name: asset for asset in assets} @@ -401,32 +402,34 @@ def add_market_prices(db: SQLAlchemy, setup_assets, setup_markets, setup_sources values = [ random() * (1 + np.sin(x * 2 * np.pi / 24)) for x in range(len(time_slots)) ] - for dt, val in zip(time_slots, values): - p = Price( - use_legacy_kwargs=False, + day1_beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, source=setup_sources["Seita"], sensor=setup_markets["epex_da"].corresponding_sensor, ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(day1_beliefs) # another day of test data (8 expensive hours, 8 cheap hours, and again 8 expensive hours) time_slots = pd.date_range( datetime(2015, 1, 2), datetime(2015, 1, 3), freq="1H", closed="left" ) values = [100] * 8 + [90] * 8 + [100] * 8 - for dt, val in zip(time_slots, values): - p = Price( - use_legacy_kwargs=False, + day2_beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=timedelta(hours=0), event_value=val, source=setup_sources["Seita"], sensor=setup_markets["epex_da"].corresponding_sensor, ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(day2_beliefs) @pytest.fixture(scope="module") diff --git a/flexmeasures/data/scripts/grid_weather.py b/flexmeasures/data/scripts/grid_weather.py index ace75c637..9c8e7282d 100755 --- a/flexmeasures/data/scripts/grid_weather.py +++ b/flexmeasures/data/scripts/grid_weather.py @@ -15,9 +15,8 @@ from flexmeasures.data.services.resources import find_closest_sensor from flexmeasures.data.config import db from flexmeasures.data.transactional import task_with_status_report -from flexmeasures.data.models.weather import Weather from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.time_series import Sensor +from flexmeasures.data.models.time_series import Sensor, TimedBelief FILE_PATH_LOCATION = "/../raw_data/weather-forecasts" DATA_SOURCE_NAME = "OpenWeatherMap" @@ -416,8 +415,7 @@ def save_forecasts_in_db( ) db_forecasts.append( - Weather( - use_legacy_kwargs=False, + TimedBelief( event_start=fc_datetime, belief_horizon=fc_horizon, event_value=fc_value, diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index 6e9e9e176..ad05358a6 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -8,7 +8,6 @@ from timetomodel.forecasting import make_rolling_forecasts import timely_beliefs as tb -from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.forecasting import lookup_model_specs_configurator from flexmeasures.data.models.forecasting.exceptions import InvalidHorizonException @@ -17,7 +16,7 @@ get_query_window, check_data_availability, ) -from flexmeasures.data.utils import get_data_source +from flexmeasures.data.utils import get_data_source, save_to_db from flexmeasures.utils.time_utils import ( as_server_time, server_now, diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index d80150a49..1ea875994 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -10,12 +10,11 @@ from rq.job import Job import timely_beliefs as tb -from flexmeasures.api.common.utils.api_utils import save_to_db from flexmeasures.data.config import db from flexmeasures.data.models.planning.battery import schedule_battery from flexmeasures.data.models.planning.charging_station import schedule_charging_station from flexmeasures.data.models.time_series import Sensor, TimedBelief -from flexmeasures.data.utils import get_data_source +from flexmeasures.data.utils import get_data_source, save_to_db """ The life cycle of a scheduling job: diff --git a/flexmeasures/data/tests/conftest.py b/flexmeasures/data/tests/conftest.py index c60816729..aff5572bf 100644 --- a/flexmeasures/data/tests/conftest.py +++ b/flexmeasures/data/tests/conftest.py @@ -9,9 +9,10 @@ from flask_sqlalchemy import SQLAlchemy from statsmodels.api import OLS -from flexmeasures.data.models.assets import Asset, Power +from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.data_sources import DataSource -from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather +from flexmeasures.data.models.time_series import TimedBelief +from flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor from flexmeasures.data.models.forecasting import model_map from flexmeasures.data.models.forecasting.model_spec_factory import ( create_initial_model_specs, @@ -74,16 +75,17 @@ def setup_fresh_test_data( datetime(2015, 1, 1), datetime(2015, 1, 1, 23, 45), freq="15T" ) values = [random() * (1 + np.sin(x / 15)) for x in range(len(time_slots))] - for dt, val in zip(time_slots, values): - p = Power( - use_legacy_kwargs=False, + beliefs = [ + TimedBelief( event_start=as_server_time(dt), belief_horizon=parse_duration("PT0M"), event_value=val, sensor=asset.corresponding_sensor, source=data_source, ) - db.session.add(p) + for dt, val in zip(time_slots, values) + ] + db.session.add_all(beliefs) add_test_weather_sensor_and_forecasts(fresh_db) @@ -131,8 +133,7 @@ def add_test_weather_sensor_and_forecasts(db: SQLAlchemy): values = [value * 600 for value in values] for dt, val in zip(time_slots, values): db.session.add( - Weather( - use_legacy_kwargs=False, + TimedBelief( sensor=sensor.corresponding_sensor, event_start=as_server_time(dt), event_value=val, diff --git a/flexmeasures/data/tests/test_time_series_services.py b/flexmeasures/data/tests/test_time_series_services.py index 5125e7c48..9d9589aec 100644 --- a/flexmeasures/data/tests/test_time_series_services.py +++ b/flexmeasures/data/tests/test_time_series_services.py @@ -1,7 +1,7 @@ import pandas as pd from timely_beliefs import utils as tb_utils -from flexmeasures.api.common.utils.api_utils import save_to_db +from flexmeasures.data.utils import save_to_db from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index fe78d8a81..b5096fd61 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -1,9 +1,13 @@ -from typing import List, Optional +from typing import List, Optional, Union import click +from flask import current_app +from timely_beliefs import BeliefsDataFrame -from flexmeasures.data.config import db +from flexmeasures.data import db from flexmeasures.data.models.data_sources import DataSource +from flexmeasures.data.models.time_series import TimedBelief +from flexmeasures.data.services.time_series import drop_unchanged_beliefs def save_to_session(objects: List[db.Model], overwrite: bool = False): @@ -44,3 +48,83 @@ def get_data_source( f'Session updated with new {data_source_type} data source "{data_source.__repr__()}".' ) return data_source + + +def save_to_db( + data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], + save_changed_beliefs_only: bool = True, +) -> str: + """Save the timed beliefs to the database. + + NB Flushes the session. Best to keep transactions short. + + We make the distinction between updating beliefs and replacing beliefs. + + # Updating beliefs + + An updated belief is a belief from the same source as some already saved belief, and about the same event, + but with a later belief time. If it has a different event value, then it represents a changed belief. + Note that it is possible to explicitly record unchanged beliefs (i.e. updated beliefs with a later belief time, + but with the same event value), by setting save_changed_beliefs_only to False. + + # Replacing beliefs + + A replaced belief is a belief from the same source as some already saved belief, + and about the same event and with the same belief time, but with a different event value. + Replacing beliefs is not allowed, because messing with the history corrupts data lineage. + Corrections should instead be recorded as updated beliefs. + Servers in 'play' mode are exempt from this rule, to facilitate replaying simulations. + + :param data: BeliefsDataFrame (or a list thereof) to be saved + :param save_changed_beliefs_only: if True, unchanged beliefs are skipped (updated beliefs are only stored if they represent changed beliefs) + if False, all updated beliefs are stored + :returns: status string, one of the following: + - 'success': all beliefs were saved + - 'success_with_unchanged_beliefs_skipped': not all beliefs represented a state change + """ + + # Convert to list + if not isinstance(data, list): + timed_values_list = [data] + else: + timed_values_list = data + + status = "success" + for timed_values in timed_values_list: + + if timed_values.empty: + # Nothing to save + continue + + len_before = len(timed_values) + if save_changed_beliefs_only: + + # Drop beliefs that haven't changed + timed_values = ( + timed_values.convert_index_from_belief_horizon_to_time() + .groupby(level=["belief_time", "source"], as_index=False) + .apply(drop_unchanged_beliefs) + ) + len_after = len(timed_values) + if len_after < len_before: + status = "success_with_unchanged_beliefs_skipped" + + # Work around bug in which groupby still introduces an index level, even though we asked it not to + if None in timed_values.index.names: + timed_values.index = timed_values.index.droplevel(None) + + if timed_values.empty: + # No state changes among the beliefs + continue + + current_app.logger.info("SAVING TO DB...") + TimedBelief.add_to_session( + session=db.session, + beliefs_data_frame=timed_values, + allow_overwrite=False + if current_app.config.get("FLEXMEASURES_MODE", "") != "play" + else True, + ) + # Flush to bring up potential unique violations (due to attempting to replace beliefs) + db.session.flush() + return status From e0af47a09c66fb7f4135324b38cc357ada336527 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 3 Jan 2022 14:40:57 +0100 Subject: [PATCH 26/46] Issue 273 add roundtrip efficiency to scheduler (#291) Rewrite our generic device scheduler to: - Deal with asymmetric efficiency losses of individual devices. - Deal with asymmetric up and down prices for deviating from previous commitments. Also allow round-trip efficiency to be communicated as a new optional field when POSTing UDI Events, with efficiency losses being assigned equally to charging and discharging. * Query TimedBelief rather than Power in api v1.3 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1.3 implementations Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in user services tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in query tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in forecasting tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in scheduling tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 tests Signed-off-by: F.N. Claessen * Simplify data deletion, like, by a lot Signed-off-by: F.N. Claessen * Count ex-ante TimedBeliefs after populating time series forecasts Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price in api v1_1 tests Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in Resource.load_sensor_data Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power/Price/Weather in api v2.0 tests Signed-off-by: F.N. Claessen * Refactor: simplify duplicate query construction Signed-off-by: F.N. Claessen * Add custom join target to get rid of SA warning Signed-off-by: F.N. Claessen * Filter criteria should work for both TimedBeliefs and TimedValues Signed-off-by: F.N. Claessen * Clarify docstring Signed-off-by: F.N. Claessen * Query TimedBelief rather than Power in api v1 implementations Signed-off-by: F.N. Claessen * Schedules should contain one deterministic belief per event Signed-off-by: F.N. Claessen * Fix type annotation Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen * Query TimedBelief rather than Price/Weather for analytics Signed-off-by: F.N. Claessen * Query deterministic TimedBelief rather than Price for planning queries Signed-off-by: F.N. Claessen * Forecast TimedBelief rather than Power/Price/Weather Signed-off-by: F.N. Claessen * Schedule TimedBelief rather than Power Signed-off-by: F.N. Claessen * Apparently, to initialize a TimedBelief is to save a TimedBelief, too Signed-off-by: F.N. Claessen * Create TimedBelief rather than Power/Price/Weather in data generation script Signed-off-by: F.N. Claessen * Bump timely-beliefs dependency Signed-off-by: F.N. Claessen * Fix latest state query Signed-off-by: F.N. Claessen * Revert "Apparently, to initialize a TimedBelief is to save a TimedBelief, too" This reverts commit fb58ec7459dd69c571bee27cdce61e67c14617ae. * Prevent saving TimedBelief to session upon updating Sensor or Source Signed-off-by: F.N. Claessen * Create only TimedBeliefs in conftests Signed-off-by: F.N. Claessen * Use session.add_all calls instead of session.bulk_save_objects or individual session.add calls Signed-off-by: F.N. Claessen * API directly creates TimedBeliefs Signed-off-by: F.N. Claessen * CLI uses TimedBeliefs only Signed-off-by: F.N. Claessen * Data scripts use TimedBeliefs only Signed-off-by: F.N. Claessen * One more conftest switched to creating TimedBeliefs instead of Weather objects Signed-off-by: F.N. Claessen * Expand docstring note on forbidden replacements Signed-off-by: F.N. Claessen * Clarify docstring note on saving changed beliefs only Signed-off-by: F.N. Claessen * Remove redundant flush Signed-off-by: F.N. Claessen * Catch forbidden belief replacements with more specific exception Signed-off-by: F.N. Claessen * Rename variable Signed-off-by: F.N. Claessen * One transaction per request Signed-off-by: F.N. Claessen * Only enqueue forecasting jobs upon successfully saving new data Signed-off-by: F.N. Claessen * Flush instead of commit Signed-off-by: F.N. Claessen * Expand test for forbidden data replacement Signed-off-by: F.N. Claessen * Simplify play mode excemption for replacing beliefs Signed-off-by: F.N. Claessen * Add note about potential session rollback Signed-off-by: F.N. Claessen * Typo Signed-off-by: F.N. Claessen * Move UniqueViolation catching logic to error handler Signed-off-by: F.N. Claessen * flake8 Signed-off-by: F.N. Claessen * Rewrite solver to deal with asymmetry in up and down commitment prices Signed-off-by: F.N. Claessen * Add optional roundtrip_efficiency field to UDI events, and use it to scale prices Signed-off-by: F.N. Claessen * Add test cases for various round-trip efficiencies Signed-off-by: F.N. Claessen * Add changelog entries Signed-off-by: F.N. Claessen * Add documentation for the new API field Signed-off-by: F.N. Claessen * Grammar corrections Signed-off-by: F.N. Claessen * Fix return value for empty EMS Signed-off-by: F.N. Claessen * Allow efficiencies per device for multi-device EMS, by stopping the application of round-trip efficiency as price scalars and modeling device flows in more detail Signed-off-by: F.N. Claessen * Relax tests using some tolerance Signed-off-by: F.N. Claessen * Fix mistake Signed-off-by: F.N. Claessen * Add test docstring Signed-off-by: F.N. Claessen * Check round-trip efficiency for acceptable range Signed-off-by: F.N. Claessen * Expand docstring Signed-off-by: F.N. Claessen --- documentation/api/change_log.rst | 8 ++ documentation/changelog.rst | 1 + flexmeasures/api/v1_3/implementations.py | 4 + flexmeasures/api/v1_3/routes.py | 4 +- flexmeasures/data/models/planning/battery.py | 14 +++ .../data/models/planning/charging_station.py | 14 ++- flexmeasures/data/models/planning/solver.py | 116 +++++++++++++++--- .../data/models/planning/tests/test_solver.py | 70 ++++++++--- flexmeasures/data/services/scheduling.py | 19 ++- 9 files changed, 216 insertions(+), 34 deletions(-) diff --git a/documentation/api/change_log.rst b/documentation/api/change_log.rst index 4a76a5a54..d6f119230 100644 --- a/documentation/api/change_log.rst +++ b/documentation/api/change_log.rst @@ -39,6 +39,14 @@ v2.0-0 | 2020-11-14 - REST endpoints for managing assets: `/assets/` (GET, POST) and `/asset/` (GET, PATCH, DELETE). +v1.3-11 | 2022-01-01 +"""""""""""""""""""" + +*Affects all versions since v1.3*. + +- Extended the *postUdiEvent* endpoint with an optional "roundtrip_efficiency" field, for use in scheduling. + + v1.3-10 | 2021-11-08 """""""""""""""""""" diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 4015730cc..b79348562 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -11,6 +11,7 @@ v0.8.0 | November XX, 2021 New features ----------- * Charts with sensor data can be requested in one of the supported [`vega-lite themes `_] (incl. a dark theme) [see `PR #221 `_] +* Schedulers take into account round-trip efficiency if set [see `PR #291 `_] Bugfixes ----------- diff --git a/flexmeasures/api/v1_3/implementations.py b/flexmeasures/api/v1_3/implementations.py index 53fe333cf..d478ff279 100644 --- a/flexmeasures/api/v1_3/implementations.py +++ b/flexmeasures/api/v1_3/implementations.py @@ -280,6 +280,9 @@ def post_udi_event_response(unit): if unit == "kWh": value = value / 1000.0 + # get optional efficiency + roundtrip_efficiency = form.get("roundtrip_efficiency", None) + # set soc targets start_of_schedule = datetime end_of_schedule = datetime + current_app.config.get("FLEXMEASURES_PLANNING_HORIZON") @@ -349,6 +352,7 @@ def post_udi_event_response(unit): belief_time=datetime, soc_at_start=value, soc_targets=soc_targets, + roundtrip_efficiency=roundtrip_efficiency, udi_event_ea=form.get("event"), enqueue=True, ) diff --git a/flexmeasures/api/v1_3/routes.py b/flexmeasures/api/v1_3/routes.py index 0e4c5addd..4154c7693 100644 --- a/flexmeasures/api/v1_3/routes.py +++ b/flexmeasures/api/v1_3/routes.py @@ -104,6 +104,7 @@ def post_udi_event(): This "PostUdiEventRequest" message posts a state of charge (soc) of 12.1 kWh at 10.00am, and a target state of charge of 25 kWh at 4.00pm, as UDI event 204 of device 10 of owner 7. + Roundtrip efficiency for use in scheduling is set to 98%. .. code-block:: json @@ -118,7 +119,8 @@ def post_udi_event(): "value": 25, "datetime": "2015-06-02T16:00:00+00:00" } - ] + ], + "roundtrip_efficiency": 0.98 } **Example response** diff --git a/flexmeasures/data/models/planning/battery.py b/flexmeasures/data/models/planning/battery.py index 98d95ba0c..41798122f 100644 --- a/flexmeasures/data/models/planning/battery.py +++ b/flexmeasures/data/models/planning/battery.py @@ -21,6 +21,7 @@ def schedule_battery( resolution: timedelta, soc_at_start: float, soc_targets: Optional[pd.Series] = None, + roundtrip_efficiency: Optional[float] = None, prefer_charging_sooner: bool = True, ) -> Union[pd.Series, None]: """Schedule a battery asset based directly on the latest beliefs regarding market prices within the specified time @@ -37,6 +38,13 @@ def schedule_battery( ], ) + # Check for round-trip efficiency + if roundtrip_efficiency is None: + # Get default from sensor, or use 100% otherwise + roundtrip_efficiency = sensor.get_attribute("roundtrip_efficiency", 1) + if roundtrip_efficiency <= 0 or roundtrip_efficiency > 1: + raise ValueError("roundtrip_efficiency expected within the interval (0, 1]") + # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( sensor, (start, end), resolution, allow_trimmed_query_window=True @@ -69,6 +77,8 @@ def schedule_battery( "derivative equals", "derivative max", "derivative min", + "derivative down efficiency", + "derivative up efficiency", ] device_constraints = [initialize_df(columns, start, end, resolution)] if soc_targets is not None: @@ -90,6 +100,10 @@ def schedule_battery( ) device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") + # Apply round-trip efficiency evenly to charging and discharging + device_constraints[0]["derivative down efficiency"] = roundtrip_efficiency ** 0.5 + device_constraints[0]["derivative up efficiency"] = roundtrip_efficiency ** 0.5 + # Set up EMS constraints (no additional constraints) columns = ["derivative max", "derivative min"] ems_constraints = initialize_df(columns, start, end, resolution) diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 279fd9b71..93de81ac8 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import Optional, Union from datetime import datetime, timedelta from pandas import Series, Timestamp @@ -21,6 +21,7 @@ def schedule_charging_station( resolution: timedelta, soc_at_start: float, soc_targets: Series, + roundtrip_efficiency: Optional[float] = None, prefer_charging_sooner: bool = True, ) -> Union[Series, None]: """Schedule a charging station asset based directly on the latest beliefs regarding market prices within the specified time @@ -32,6 +33,13 @@ def schedule_charging_station( # Check for required Sensor attributes sensor.check_required_attributes([("capacity_in_mw", (float, int))]) + # Check for round-trip efficiency + if roundtrip_efficiency is None: + # Get default from sensor, or use 100% otherwise + roundtrip_efficiency = sensor.get_attribute("roundtrip_efficiency", 1) + if roundtrip_efficiency <= 0 or roundtrip_efficiency > 1: + raise ValueError("roundtrip_efficiency expected within the interval (0, 1]") + # Check for known prices or price forecasts, trimming planning window accordingly prices, (start, end) = get_prices( sensor, (start, end), resolution, allow_trimmed_query_window=True @@ -95,6 +103,10 @@ def schedule_charging_station( else: device_constraints[0]["derivative max"] = sensor.get_attribute("capacity_in_mw") + # Apply round-trip efficiency evenly to charging and discharging + device_constraints[0]["derivative down efficiency"] = roundtrip_efficiency ** 0.5 + device_constraints[0]["derivative up efficiency"] = roundtrip_efficiency ** 0.5 + # Set up EMS constraints (no additional constraints) columns = ["derivative max", "derivative min"] ems_constraints = initialize_df(columns, start, end, resolution) diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index 78341b5e8..a473553f3 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -10,6 +10,8 @@ RangeSet, Param, Reals, + NonNegativeReals, + NonPositiveReals, Constraint, Objective, minimize, @@ -30,8 +32,11 @@ def device_scheduler( # noqa C901 commitment_downwards_deviation_price: Union[List[pd.Series], List[float]], commitment_upwards_deviation_price: Union[List[pd.Series], List[float]], ) -> Tuple[List[pd.Series], float, SolverResults]: - """Schedule devices given constraints on a device and EMS level, and given a list of commitments by the EMS. - The commitments are assumed to be with regards to the flow of energy to the device (positive for consumption, + """This generic device scheduler is able to handle an EMS with multiple devices, + with various types of constraints on the EMS level and on the device level, + and with multiple market commitments on the EMS level. + A typical example is a house with many devices. + The commitments are assumed to be with regard to the flow of energy to the device (positive for consumption, negative for production). The solver minimises the costs of deviating from the commitments. Device constraints are on a device level. Handled constraints (listed by column name): @@ -41,6 +46,8 @@ def device_scheduler( # noqa C901 derivative max: maximum flow (e.g. in MW or boxes/h) derivative min: minimum flow derivative equals: exact amount of flow (we do this by clamping derivative min and derivative max) + derivative down efficiency: ratio of downwards flows (flow into EMS : flow out of device) + derivative up efficiency: ratio of upwards flows (flow into device : flow out of EMS) EMS constraints are on an EMS level. Handled constraints (listed by column name): derivative max: maximum flow derivative min: minimum flow @@ -54,13 +61,13 @@ def device_scheduler( # noqa C901 All Series and DataFrames should have the same resolution. - For now we pass in the various constraints and prices as separate variables, from which we make a MultiIndex + For now, we pass in the various constraints and prices as separate variables, from which we make a MultiIndex DataFrame. Later we could pass in a MultiIndex DataFrame directly. """ # If the EMS has no devices, don't bother if len(device_constraints) == 0: - return [], 0 + return [], 0, SolverResults() # Check if commitments have the same time window and resolution as the constraints start = device_constraints[0].index.to_pydatetime()[0] @@ -164,6 +171,18 @@ def ems_derivative_min_select(m, j): else: return v + def device_derivative_down_efficiency(m, d, j): + try: + return device_constraints[d]["derivative down efficiency"].iloc[j] + except KeyError: + return 1 + + def device_derivative_up_efficiency(m, d, j): + try: + return device_constraints[d]["derivative up efficiency"].iloc[j] + except KeyError: + return 1 + model.up_price = Param(model.c, model.j, initialize=price_up_select) model.down_price = Param(model.c, model.j, initialize=price_down_select) model.commitment_quantity = Param( @@ -179,45 +198,107 @@ def ems_derivative_min_select(m, j): ) model.ems_derivative_max = Param(model.j, initialize=ems_derivative_max_select) model.ems_derivative_min = Param(model.j, initialize=ems_derivative_min_select) + model.device_derivative_down_efficiency = Param( + model.d, model.j, initialize=device_derivative_down_efficiency + ) + model.device_derivative_up_efficiency = Param( + model.d, model.j, initialize=device_derivative_up_efficiency + ) # Add variables - model.power = Var(model.d, model.j, domain=Reals, initialize=0) + model.ems_power = Var(model.d, model.j, domain=Reals, initialize=0) + model.device_power_down = Var( + model.d, model.j, domain=NonPositiveReals, initialize=0 + ) + model.device_power_up = Var(model.d, model.j, domain=NonNegativeReals, initialize=0) + model.commitment_downwards_deviation = Var( + model.c, model.j, domain=NonPositiveReals, initialize=0 + ) + model.commitment_upwards_deviation = Var( + model.c, model.j, domain=NonNegativeReals, initialize=0 + ) # Add constraints as a tuple of (lower bound, value, upper bound) def device_bounds(m, d, j): return ( m.device_min[d, j], - sum(m.power[d, k] for k in range(0, j + 1)), + sum( + m.device_power_down[d, k] + m.device_power_up[d, k] + for k in range(0, j + 1) + ), m.device_max[d, j], ) def device_derivative_bounds(m, d, j): return ( m.device_derivative_min[d, j], - m.power[d, j], + m.device_power_down[d, j] + m.device_power_up[d, j], + m.device_derivative_max[d, j], + ) + + def device_down_derivative_bounds(m, d, j): + return ( + m.device_derivative_min[d, j], + m.device_power_down[d, j], + 0, + ) + + def device_up_derivative_bounds(m, d, j): + return ( + 0, + m.device_power_up[d, j], m.device_derivative_max[d, j], ) def ems_derivative_bounds(m, j): - return m.ems_derivative_min[j], sum(m.power[:, j]), m.ems_derivative_max[j] + return m.ems_derivative_min[j], sum(m.ems_power[:, j]), m.ems_derivative_max[j] + + def ems_flow_commitment_equalities(m, j): + """Couple EMS flows (sum over devices) to commitments.""" + return ( + 0, + sum(m.commitment_quantity[:, j]) + + sum(m.commitment_downwards_deviation[:, j]) + + sum(m.commitment_upwards_deviation[:, j]) + - sum(m.ems_power[:, j]), + 0, + ) + + def device_derivative_equalities(m, d, j): + """Couple device flows to EMS flows per device, applying efficiencies.""" + return ( + 0, + m.device_power_up[d, j] / m.device_derivative_up_efficiency[d, j] + + m.device_power_down[d, j] * m.device_derivative_down_efficiency[d, j] + - m.ems_power[d, j], + 0, + ) model.device_energy_bounds = Constraint(model.d, model.j, rule=device_bounds) model.device_power_bounds = Constraint( model.d, model.j, rule=device_derivative_bounds ) + model.device_power_down_bounds = Constraint( + model.d, model.j, rule=device_down_derivative_bounds + ) + model.device_power_up_bounds = Constraint( + model.d, model.j, rule=device_up_derivative_bounds + ) model.ems_power_bounds = Constraint(model.j, rule=ems_derivative_bounds) + model.ems_power_commitment_equalities = Constraint( + model.j, rule=ems_flow_commitment_equalities + ) + model.device_power_equalities = Constraint( + model.d, model.j, rule=device_derivative_equalities + ) # Add objective def cost_function(m): costs = 0 for c in m.c: for j in m.j: - ems_power_in_j = sum(m.power[d, j] for d in m.d) - ems_power_deviation = ems_power_in_j - m.commitment_quantity[c, j] - if value(ems_power_deviation) >= 0: - costs += ems_power_deviation * m.up_price[c, j] - else: - costs += ems_power_deviation * m.down_price[c, j] + costs += m.commitment_downwards_deviation[c, j] * m.down_price[c, j] + costs += m.commitment_upwards_deviation[c, j] * m.up_price[c, j] return costs model.costs = Objective(rule=cost_function, sense=minimize) @@ -230,7 +311,10 @@ def cost_function(m): planned_costs = value(model.costs) planned_power_per_device = [] for d in model.d: - planned_device_power = [model.power[d, j].value for j in model.j] + planned_device_power = [ + model.device_power_down[d, j].value + model.device_power_up[d, j].value + for j in model.j + ] planned_power_per_device.append( pd.Series( index=pd.date_range( @@ -243,5 +327,5 @@ def cost_function(m): # model.pprint() # print(results.solver.termination_condition) # print(planned_costs) - # input() + # model.display() return planned_power_per_device, planned_costs, results diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index 10562f14d..a979d743d 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -11,6 +11,9 @@ from flexmeasures.utils.time_utils import as_server_time +TOLERANCE = 0.00001 + + def test_battery_solver_day_1(add_battery_assets): epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() @@ -26,14 +29,34 @@ def test_battery_solver_day_1(add_battery_assets): print(soc_schedule) # Check if constraints were met - assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 + assert ( + min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 - TOLERANCE + ) assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") for soc in soc_schedule.values: assert soc >= battery.get_attribute("min_soc_in_mwh") assert soc <= battery.get_attribute("max_soc_in_mwh") -def test_battery_solver_day_2(add_battery_assets): +@pytest.mark.parametrize( + "roundtrip_efficiency", + [ + 1, + 0.99, + 0.01, + ], +) +def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): + """Check battery scheduling results for day 2, which is set up with + 8 expensive, then 8 cheap, then again 8 expensive hours. + If efficiency losses aren't too bad, we expect the scheduler to: + - completely discharge within the first 8 hours + - completely charge within the next 8 hours + - completely discharge within the last 8 hours + If efficiency losses are bad, the price difference is not worth cycling the battery, + and so we expect the scheduler to only: + - completely discharge within the last 8 hours + """ epex_da = Sensor.query.filter(Sensor.name == "epex_da").one_or_none() battery = Sensor.query.filter(Sensor.name == "Test battery").one_or_none() assert Sensor.query.get(battery.get_attribute("market_id")) == epex_da @@ -41,7 +64,14 @@ def test_battery_solver_day_2(add_battery_assets): end = as_server_time(datetime(2015, 1, 3)) resolution = timedelta(minutes=15) soc_at_start = battery.get_attribute("soc_in_mwh") - schedule = schedule_battery(battery, start, end, resolution, soc_at_start) + schedule = schedule_battery( + battery, + start, + end, + resolution, + soc_at_start, + roundtrip_efficiency=roundtrip_efficiency, + ) soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) with pd.option_context("display.max_rows", None, "display.max_columns", 3): @@ -49,7 +79,7 @@ def test_battery_solver_day_2(add_battery_assets): # Check if constraints were met assert min(schedule.values) >= battery.get_attribute("capacity_in_mw") * -1 - assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") + assert max(schedule.values) <= battery.get_attribute("capacity_in_mw") + TOLERANCE for soc in soc_schedule.values: assert soc >= battery.get_attribute("min_soc_in_mwh") assert soc <= battery.get_attribute("max_soc_in_mwh") @@ -58,12 +88,23 @@ def test_battery_solver_day_2(add_battery_assets): assert soc_schedule.iloc[-1] == battery.get_attribute( "min_soc_in_mwh" ) # Battery sold out at the end of its planning horizon - assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( - "min_soc_in_mwh" - ) # Sell what you begin with - assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( - "max_soc_in_mwh" - ) # Buy what you can to sell later + + # As long as the roundtrip efficiency isn't too bad (I haven't computed the actual switch point) + if roundtrip_efficiency > 0.9: + assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( + "min_soc_in_mwh" + ) # Sell what you begin with + assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( + "max_soc_in_mwh" + ) # Buy what you can to sell later + else: + # If the roundtrip efficiency is poor, best to stand idle + assert soc_schedule.loc[start + timedelta(hours=8)] == battery.get_attribute( + "soc_in_mwh" + ) + assert soc_schedule.loc[start + timedelta(hours=16)] == battery.get_attribute( + "soc_in_mwh" + ) @pytest.mark.parametrize( @@ -109,12 +150,13 @@ def test_charging_station_solver_day_2(target_soc, charging_station_name): min(consumption_schedule.values) >= charging_station.get_attribute("capacity_in_mw") * -1 ) - assert max(consumption_schedule.values) <= charging_station.get_attribute( - "capacity_in_mw" + assert ( + max(consumption_schedule.values) + <= charging_station.get_attribute("capacity_in_mw") + TOLERANCE ) print(consumption_schedule.head(12)) print(soc_schedule.head(12)) - assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < 0.00001 + assert abs(soc_schedule.loc[target_soc_datetime] - target_soc) < TOLERANCE @pytest.mark.parametrize( @@ -171,5 +213,5 @@ def test_fallback_to_unsolvable_problem(target_soc, charging_station_name): print(soc_schedule.head(12)) assert ( abs(abs(soc_schedule.loc[target_soc_datetime] - target_soc) - expected_gap) - < 0.00001 + < TOLERANCE ) diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index 1ea875994..13af125bd 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -36,6 +36,7 @@ def create_scheduling_job( resolution: timedelta = DEFAULT_RESOLUTION, soc_at_start: Optional[float] = None, soc_targets: Optional[pd.Series] = None, + roundtrip_efficiency: Optional[float] = None, udi_event_ea: Optional[str] = None, enqueue: bool = True, ) -> Job: @@ -61,6 +62,7 @@ def create_scheduling_job( resolution=resolution, soc_at_start=soc_at_start, soc_targets=soc_targets, + roundtrip_efficiency=roundtrip_efficiency, ), id=udi_event_ea, connection=current_app.queues["scheduling"].connection, @@ -88,6 +90,7 @@ def make_schedule( resolution: timedelta, soc_at_start: Optional[float] = None, soc_targets: Optional[pd.Series] = None, + roundtrip_efficiency: Optional[float] = None, ) -> bool: """Preferably, a starting soc is given. Otherwise, we try to retrieve the current state of charge from the asset (if that is the valid one at the start). @@ -122,14 +125,26 @@ def make_schedule( if sensor.generic_asset.generic_asset_type.name == "battery": consumption_schedule = schedule_battery( - sensor, start, end, resolution, soc_at_start, soc_targets + sensor, + start, + end, + resolution, + soc_at_start, + soc_targets, + roundtrip_efficiency, ) elif sensor.generic_asset.generic_asset_type.name in ( "one-way_evse", "two-way_evse", ): consumption_schedule = schedule_charging_station( - sensor, start, end, resolution, soc_at_start, soc_targets + sensor, + start, + end, + resolution, + soc_at_start, + soc_targets, + roundtrip_efficiency, ) else: raise ValueError( From 1bb28194a10e24f1739b7287dee9b9bdb9ffd106 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Wed, 5 Jan 2022 16:59:22 +0100 Subject: [PATCH 27/46] Issue 247 generic asset crud (#290) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add a new method to specify an auth context (by callable) and discuss steps forward for use cases not covered yet by auth policy * add ACL for GenericAsset * added /dev/generic_assets API, CRUD uses it (passing manual testing) * fix asset icon in asset view * remove verbs from /api/dev/generic_assets API routes (HTTP method already defines what happens) * make CRUD UI tests (mocking the asset API) work * auth policy: change 'create' to 'create-children' permission (removing the need for issue#200); fix modelling problem if several distinct principal formulations exist for one permission * separate fresh tests from others; only generate copies of generic assets when we don't already have them (distinguish by name); re-add old asset tests; * add missing mock in another ui test * add unique contraints: on GenericAssetType.name & on GenericAssets for name+account_id * improve explanation of our authorization policy implementation * update changelog * improve changelog, also mention temporary API location * comments next to test cases Signed-off-by: Nicolas Höning * create_user function hashes the password -e Signed-off-by: Nicolas Höning * more informative validation errror Signed-off-by: Nicolas Höning * allow creation of public Assets in UI Signed-off-by: Nicolas Höning * Fix display of owning account Signed-off-by: Nicolas Höning * better changelog docs Signed-off-by: Nicolas Höning * list sensors on asset page Signed-off-by: Nicolas Höning * do not fail getting latest state if capacity_in_mw is not in attributes Signed-off-by: Nicolas Höning * better default for capacity_in_mw Signed-off-by: Nicolas Höning * use id in the dict, for sensors lookup Signed-off-by: Nicolas Höning * Suggested fixes to pr 290 (#299) Remove the entity addresses from the generic asset listing, and list the number of sensors instead. * grammar Signed-off-by: F.N. Claessen * Update generic asset listing, replacing entity addresses with number of sensors Signed-off-by: F.N. Claessen * Avoid crashing on formatting None values Signed-off-by: F.N. Claessen * Fix wrong variable name Signed-off-by: F.N. Claessen * Fix incorrect type annotation Signed-off-by: F.N. Claessen * Partly revert cefe3202ec2c54fa970d90c46162f48193d2746b Signed-off-by: F.N. Claessen * small improvement on locating ea addresses Signed-off-by: Nicolas Höning * Describe current state of transition Signed-off-by: Nicolas Höning * deprecation notice in old-style asset API documentation Signed-off-by: Nicolas Höning * Fix type annotation Signed-off-by: F.N. Claessen * Update documentation Signed-off-by: F.N. Claessen * Update v2.0 connection entity addresses in documentation Signed-off-by: F.N. Claessen * Correct quickrefs in v2.0 documentation Signed-off-by: F.N. Claessen * Update other v2.0 sensor entity addresses in documentation Signed-off-by: F.N. Claessen * Fix entity address dates corresponding to older previous domain name Signed-off-by: F.N. Claessen * Adapt more occurrences of older dates in entity addresses, and switch to fm1 scheme. Signed-off-by: F.N. Claessen Co-authored-by: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Co-authored-by: F.N. Claessen --- documentation/api/change_log.rst | 15 +- documentation/api/introduction.rst | 68 +++-- documentation/changelog.rst | 3 +- .../dev/note-on-datamodel-transition.rst | 10 + documentation/tut/forecasting_scheduling.rst | 8 +- documentation/tut/posting_data.rst | 31 +- flexmeasures/api/common/schemas/users.py | 18 +- flexmeasures/api/dev/__init__.py | 6 +- flexmeasures/api/dev/assets.py | 86 ++++++ flexmeasures/api/dev/tests/conftest.py | 10 +- flexmeasures/api/dev/tests/test_assets_api.py | 225 +++++++++++++++ .../api/dev/tests/test_assets_api_fresh_db.py | 65 +++++ .../dev/tests/test_sensor_data_fresh_db.py | 1 - flexmeasures/api/dev/tests/utils.py | 11 + flexmeasures/api/tests/utils.py | 29 +- flexmeasures/api/v1/tests/conftest.py | 14 +- flexmeasures/api/v1_1/routes.py | 12 +- flexmeasures/api/v1_2/routes.py | 6 +- flexmeasures/api/v1_3/routes.py | 8 +- .../api/v2_0/implementations/assets.py | 5 + .../api/v2_0/implementations/users.py | 4 +- flexmeasures/api/v2_0/routes.py | 44 +-- flexmeasures/auth/decorators.py | 33 ++- flexmeasures/auth/policy.py | 96 ++++--- .../auth/tests/test_principal_matching.py | 10 + flexmeasures/cli/data_add.py | 3 +- flexmeasures/conftest.py | 90 ++++-- .../a918360f7d63_add_unique_contraints_on_.py | 37 +++ flexmeasures/data/models/assets.py | 12 +- flexmeasures/data/models/generic_assets.py | 28 +- flexmeasures/data/models/time_series.py | 12 +- flexmeasures/data/models/user.py | 20 +- flexmeasures/data/queries/generic_assets.py | 10 +- flexmeasures/data/schemas/assets.py | 2 + flexmeasures/data/schemas/generic_assets.py | 19 +- flexmeasures/data/services/resources.py | 2 +- flexmeasures/data/services/users.py | 15 +- flexmeasures/data/tests/test_user_services.py | 16 +- flexmeasures/ui/__init__.py | 3 +- flexmeasures/ui/charts/latest_state.py | 2 +- flexmeasures/ui/crud/assets.py | 272 ++++++++++-------- .../ui/templates/admin/logged_in_user.html | 12 +- flexmeasures/ui/templates/crud/asset.html | 151 +++++----- flexmeasures/ui/templates/crud/asset_new.html | 90 +----- flexmeasures/ui/templates/crud/assets.html | 28 +- flexmeasures/ui/templates/crud/user.html | 4 +- flexmeasures/ui/templates/views/sensors.html | 4 +- flexmeasures/ui/tests/conftest.py | 8 +- flexmeasures/ui/tests/test_asset_crud.py | 36 +-- flexmeasures/ui/tests/test_views.py | 15 +- flexmeasures/ui/tests/utils.py | 18 +- flexmeasures/ui/utils/view_utils.py | 11 +- 52 files changed, 1150 insertions(+), 588 deletions(-) create mode 100644 flexmeasures/api/dev/assets.py create mode 100644 flexmeasures/api/dev/tests/test_assets_api.py create mode 100644 flexmeasures/api/dev/tests/test_assets_api_fresh_db.py create mode 100644 flexmeasures/data/migrations/versions/a918360f7d63_add_unique_contraints_on_.py diff --git a/documentation/api/change_log.rst b/documentation/api/change_log.rst index d6f119230..b0caa9732 100644 --- a/documentation/api/change_log.rst +++ b/documentation/api/change_log.rst @@ -6,6 +6,17 @@ API change log .. note:: The FlexMeasures API follows its own versioning scheme. This is also reflected in the URL, allowing developers to upgrade at their own pace. +v2.0-4 | 2022-01-04 +""""""""""""""""""" + +- Updated entity addresses in documentation, according to the fm1 scheme. +- Changed the Introduction section: + + - Rewrote the subsection on entity addresses to refer users to where they can find the entity addresses of their sensors. + - Rewrote the subsection on sensor identification (formerly known as asset identification) to place the fm1 scheme front and center. + +- Fixed the categorisation of the *postMeterData*, *postPrognosis*, *postPriceData* and *postWeatherData* endpoints from the User category to the Data category. + v2.0-3 | 2021-06-07 """"""""""""""""""" @@ -152,14 +163,14 @@ v1.2-1 | 2018-09-24 { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc", + "event": "ea1.2021-01.io.flexmeasures.company:7:10:203:soc", } rather than the erroneously double-keyed: { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203", + "event": "ea1.2021-01.io.flexmeasures.company:7:10:203", "type": "soc" } diff --git a/documentation/api/introduction.rst b/documentation/api/introduction.rst index 94b21ec1a..be573f97a 100644 --- a/documentation/api/introduction.rst +++ b/documentation/api/introduction.rst @@ -141,7 +141,7 @@ We distinguish the following roles with different access rights to the individua - admin - Aggregator - Supplier: an energy retailer (see :ref:`supplier`) -- Prosumer: an asset owner (see :ref:`prosumer`) +- Prosumer: owner of a grid connection (see :ref:`prosumer`) - ESCo: an energy service company (see :ref:`esco`) - MDC: a meter data company (see :ref:`mdc`) - DSO: a distribution system operator (see :ref:`dso`) @@ -182,7 +182,7 @@ The API, however, does not distinguish between singular and plural key notation. Connections and entity addresses ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Connections are end points of the grid at which an asset is located. +A connection represents an end point of the grid, at which an electricity sensor (power meter) is located. Connections should be identified with an entity address following the EA1 addressing scheme prescribed by USEF[1], which is mostly taken from IETF RFC 3720 [2]: @@ -199,17 +199,23 @@ Here is a full example for a FlexMeasures connection address: .. code-block:: json { - "connection": "ea1.2021-02.io.flexmeasures.company:fm0.30:73" + "connection": "ea1.2021-02.io.flexmeasures.company:fm1.73" } -where FlexMeasures runs at `company.flexmeasures.io` (which the current domain owner started using in February 2021), and the locally unique string is of scheme `fm0` (see below) and the asset ID is 73. The asset's owner ID is 30, but this part is optional. +where FlexMeasures runs at `company.flexmeasures.io` (which the current domain owner started using in February 2021), and the locally unique string uses the `fm1` scheme (see below) to identify sensor ID 73. -Both the owner ID and the asset ID, as well as the full entity address can be obtained on the asset's listing: +Assets are listed at: .. code-block:: html https://company.flexmeasures.io/assets +The full entity addresses of all of the asset's sensors can be obtained on the asset's page, e.g. for asset 81: + +.. code-block:: html + + https://company.flexmeasures.io/assets/81 + Entity address structure """""""""""""""""""""""""" @@ -231,41 +237,31 @@ Some deeper explanations about an entity address: [3] https://tools.ietf.org/html/rfc3721 -Types of asset identifications used in FlexMeasures +Types of sensor identification used in FlexMeasures """""""""""""""""""""""""""""""""""""""""""""""""""" -FlexMeasures expects the locally unique string string to contain information in -a certain structure. We distinguish type ``fm0`` and type ``fm1`` FlexMeasures entity addresses. - -The ``fm0`` scheme is the original scheme. It identifies connected assets, weather stations, markets and UDI events in different ways. +FlexMeasures expects the locally unique string string to contain information in a certain structure. +We distinguish type ``fm0`` and type ``fm1`` FlexMeasures entity addresses. -Examples for the fm0 scheme: +The ``fm1`` scheme is the latest version. +It uses the fact that all FlexMeasures sensors have unique IDs. -- connection = ea1.2021-01.localhost:fm0.40:30 -- connection = ea1.2021-01.io.flexmeasures:fm0.: -- weather_sensor = ea1.2021-01.io.flexmeasures:fm0.temperature:52:73.0 -- weather_sensor = ea1.2021-01.io.flexmeasures:fm0.:: -- market = ea1.2021-01.io.flexmeasures:fm0.epex_da -- market = ea1.2021-01.io.flexmeasures:fm0. -- event = ea1.2021-01.io.flexmeasures:fm0.40:30:302:soc -- event = ea1.2021-01.io.flexmeasures:fm0.::: +.. code-block:: -This scheme is explicit but also a little cumbersome to use, as one needs to look up the type or even owner (for assets), and weather sensors are identified by coordinates. -For the fm0 scheme, the 'fm0.' part is optional, for backwards compatibility. + ea1.2021-01.io.flexmeasures:fm1.42 + ea1.2021-01.io.flexmeasures:fm1. +.. todo:: UDI events are not yet modelled in the fm1 scheme -The ``fm1`` scheme is the latest version, currently under development. It works with the database structure -we are developing in the background, where all connected sensors have unique IDs. This makes it more straightforward (the scheme works the same way for all types of sensors), if less explicit. +The ``fm0`` scheme is the original scheme. +It identified different types of sensors (such as connections, weather sensors and markets) in different ways. +The ``fm0`` scheme has been deprecated for the most part and is no longer supported officially. +Only UDI events still need to be sent using the fm0 scheme. -Examples for the fm1 scheme: +.. code-block:: -- sensor = ea1.2021-01.io.flexmeasures:fm1.42 -- sensor = ea1.2021-01.io.flexmeasures:fm1. -- connection = ea1.2021-01.io.flexmeasures:fm1. -- market = ea1.2021-01.io.flexmeasures:fm1. -- weather_station = ea1.2021-01.io.flexmeasures:fm1. - -.. todo:: UDI events are not yet modelled in the fm1 scheme, but will probably be ea1.2021-01.io.flexmeasures:fm1. + ea1.2021-01.io.flexmeasures:fm0.40:30:302:soc + ea1.2021-01.io.flexmeasures:fm0.::: Groups @@ -280,8 +276,8 @@ When the attributes "start", "duration" and "unit" are stated outside of "groups "groups": [ { "connections": [ - "ea1.2021-02.io.flexmeasures.company:fm0.30:71", - "ea1.2021-02.io.flexmeasures.company:fm0.30:72" + "ea1.2021-02.io.flexmeasures.company:fm1.71", + "ea1.2021-02.io.flexmeasures.company:fm1.72" ], "values": [ 306.66, @@ -293,7 +289,7 @@ When the attributes "start", "duration" and "unit" are stated outside of "groups ] }, { - "connection": "ea1.2021-02.io.flexmeasures.company:fm0.30:73" + "connection": "ea1.2021-02.io.flexmeasures.company:fm1.73" "values": [ 306.66, 0, @@ -315,8 +311,8 @@ In case of a single group of connections, the message may be flattened to: { "connections": [ - "ea1.2021-02.io.flexmeasures.company:fm0.30:71", - "ea1.2021-02.io.flexmeasures.company:fm0.30:72" + "ea1.2021-02.io.flexmeasures.company:fm1.71", + "ea1.2021-02.io.flexmeasures.company:fm1.72" ], "values": [ 306.66, diff --git a/documentation/changelog.rst b/documentation/changelog.rst index b79348562..543655b7b 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -6,7 +6,7 @@ v0.8.0 | November XX, 2021 =========================== .. warning:: Upgrading to this version requires running ``flexmeasures db upgrade`` (you can create a backup first with ``flexmeasures db-ops dump``). -.. warning:: Changes to asset attributes made via the UI or API are not reflected in the new data model until `issue #247 `_ is resolved. +.. note:: v0.8.0 is doing much of the work we need to do to move to the new data model (see :ref:`note_on_datamodel_transition`). We hope to keep the migration steps for users very limited. One thing you'll notice is that we are copying over existing data to the new model (which will be kept in sync) with the `db upgrade` command (see warning above), which can take a few minutes. New features ----------- @@ -25,6 +25,7 @@ Infrastructure / Support * Add sensor method to obtain just its latest state (excl. forecasts) [see `PR #235 `_] * Migrate attributes of assets, markets and weather sensors to our new sensor model [see `PR #254 `_ and `project 9 `_] * Migrate all time series data to our new sensor data model based on the `timely beliefs `_ lib [see `PR #286 `_ and `project 9 `_] +* Support the new asset model (which describes the organisational structure, rather than sensors and data) in UI and API. Until the transition to our new data model is completed, the new API for assets is at `/api/dev/generic_assets`. [see `PR #251 `_ and `PR #290 `_] v0.7.1 | November 08, 2021 diff --git a/documentation/dev/note-on-datamodel-transition.rst b/documentation/dev/note-on-datamodel-transition.rst index 552300ffe..d50bb426e 100644 --- a/documentation/dev/note-on-datamodel-transition.rst +++ b/documentation/dev/note-on-datamodel-transition.rst @@ -58,3 +58,13 @@ Here is a brief list: - `Sensor relations and GeneralizedAssets with metadata `_: We are generalizing our database structure for organising energy data, to support all sorts of sensors and relationships between them. We do this so we can better support the diverse set of use cases for energy flexibility. - `UI views for GeneralizedAssets `_: We are updating our UI views (dashboard maps and analytics charts) according to our new database structure for organising energy data. We do this so users can customize what they want to see. - `Deprecate old database models `_: We are deprecating the Power, Price and Weather tables in favour of the TimedBelief table, and deprecating the Asset, Market and WeatherSensor tables in favour of the Sensor and GeneralizedAsset tables. We are doing this so users can move their data to the new database model. + + +The state of the transition (January 2022, v0.8.0) +--------------------------------------------------- + +Project 9 was implemented, which moved a lot of structure over, as well as actual data and some UI (dashboard, assets). We believe that was the hardest part. + +We are now close to being able to deprecate the old database models and route the API to the new model (see project 11). The API for assets is still in place, but the new one is already working (at /api/dev/generic_assets) and is powering what is shown in the UI. + +We take care to support people on the old data mode so the transition will be as smooth as possible, as we said above. One part of this is that the ``flexmeasures db upgrade`` command copies your data to the new model. Also, creating new data (e.g. old-style assets) creates new-style data (e.g. assets/sensors) automatically. However, some edge cases are not supported in this way. For instance, edited asset meta data might have to be re-entered later. Feel free to contact us to discuss the transition if needed. \ No newline at end of file diff --git a/documentation/tut/forecasting_scheduling.rst b/documentation/tut/forecasting_scheduling.rst index 07cdc8af0..14823286d 100644 --- a/documentation/tut/forecasting_scheduling.rst +++ b/documentation/tut/forecasting_scheduling.rst @@ -89,7 +89,7 @@ As an example, consider the same UDI event as we saw earlier (in :ref:`posting_f { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:204:soc-with-targets", + "event": "ea1.2021-01.io.flexmeasures.company:7:10:204:soc-with-targets", "value": 12.1, "datetime": "2015-06-02T10:00:00+00:00", "unit": "kWh", @@ -132,7 +132,7 @@ This example requests a prognosis for 24 hours, with a rolling horizon of 6 hour { "type": "GetPrognosisRequest", - "connection": "ea1.2018-06.io.flexmeasures.company:1:1", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.1", "start": "2015-01-01T00:00:00+00:00", "duration": "PT24H", "horizon": "PT6H", @@ -159,7 +159,7 @@ This example of a request body shows that we want to look up a control signal fo { "type": "GetDeviceMessageRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc" + "event": "ea1.2021-01.io.flexmeasures.company:7:10:203:soc" } The following example response indicates that FlexMeasures planned ahead 45 minutes for this battery. @@ -170,7 +170,7 @@ Each value represents the average power over a 15 minute time interval. { "type": "GetDeviceMessageResponse", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203", + "event": "ea1.2021-01.io.flexmeasures.company:7:10:203", "values": [ 2.15, 3, diff --git a/documentation/tut/posting_data.rst b/documentation/tut/posting_data.rst index 937954e36..45e3bd2c4 100644 --- a/documentation/tut/posting_data.rst +++ b/documentation/tut/posting_data.rst @@ -35,22 +35,23 @@ Weather data (both observations and forecasts) can be posted to `POST /api/v2_0 https://company.flexmeasures.io/api//postWeatherData -Weather data can be posted for the following three types of weather sensors: +Weather data can be posted for different types of sensors, such as: - "radiation" (with kW/m² as unit) - "temperature" (with °C as unit) -- "wind_speed" (with m/s as unit) +- "wind speed" (with m/s as unit) The sensor type is part of the unique entity address for each sensor, together with the sensor's latitude and longitude. -This "PostWeatherDataRequest" message posts temperature forecasts for 15-minute intervals between 3.00pm and 4.30pm for a weather sensor located at latitude 33.4843866 and longitude 126.477859. This sensor is located in Korea's timezone ― we also reflect that in the datetimes. +This "PostWeatherDataRequest" message posts temperature forecasts for 15-minute intervals between 3.00pm and 4.30pm for a weather sensor with id 602. +As this sensor is located in Korea's timezone ― we also reflect that in the datetimes. The forecasts were made at noon, as the ``prior`` field indicates. .. code-block:: json { "type": "PostWeatherDataRequest", - "sensor": "ea1.2018-06.io.flexmeasures.company:temperature:33.4843866:126.477859", + "sensor": "ea1.2021-01.io.flexmeasures.company:fm1.602", "values": [ 20.04, 20.23, @@ -106,14 +107,14 @@ Price data (both observations and forecasts) can be posted to `POST /api/v2_0/p https://company.flexmeasures.io/api//postPriceData This example "PostPriceDataRequest" message posts prices for hourly intervals between midnight and midnight the next day -for the Korean Power Exchange (KPX) day-ahead auction. +for the Korean Power Exchange (KPX) day-ahead auction, registered under sensor 16. The ``prior`` indicates that the prices were published at 3pm on December 31st 2014 (i.e. the clearing time of the KPX day-ahead market, which is at 3 PM on the previous day ― see below for a deeper explanation). .. code-block:: json { "type": "PostPriceDataRequest", - "market": "ea1.2018-06.io.flexmeasures.company:kpx_da", + "market": "ea1.2021-01.io.flexmeasures.company:fm1.16", "values": [ 52.37, 51.14, @@ -187,7 +188,7 @@ A single average power value for a 15-minute time interval for a single connecti { "type": "PostMeterDataRequest", - "connection": "ea1.2018-06.io.flexmeasures.company:1:1", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.1", "value": 220, "start": "2015-01-01T00:00:00+00:00", "duration": "PT0H15M", @@ -204,7 +205,7 @@ Multiple values (indicating a univariate timeseries) for 15-minute time interval { "type": "PostMeterDataRequest", - "connection": "ea1.2018-06.io.flexmeasures.company:1:1", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.1", "values": [ 220, 210, @@ -228,8 +229,8 @@ We recommend to use this notation for zero values only. { "type": "PostMeterDataRequest", "connections": [ - "ea1.2018-06.io.flexmeasures.company:1:1", - "ea1.2018-06.io.flexmeasures.company:1:2" + "ea1.2021-01.io.flexmeasures.company:fm1.1", + "ea1.2021-01.io.flexmeasures.company:fm1.2" ], "value": 10, "start": "2015-01-01T00:00:00+00:00", @@ -249,11 +250,11 @@ Single different values for a 15-minute time interval for two connections, poste "type": "PostMeterDataRequest", "groups": [ { - "connection": "ea1.2018-06.io.flexmeasures.company:1:1", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.1", "value": 220 }, { - "connection": "ea1.2018-06.io.flexmeasures.company:1:2", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.2", "value": 300 } ], @@ -274,7 +275,7 @@ Multiple values (indicating a univariate timeseries) for 15-minute time interval "type": "PostMeterDataRequest", "groups": [ { - "connection": "ea1.2018-06.io.flexmeasures.company:1:1", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.1", "values": [ 220, 210, @@ -282,7 +283,7 @@ Multiple values (indicating a univariate timeseries) for 15-minute time interval ] }, { - "connection": "ea1.2018-06.io.flexmeasures.company:1:2", + "connection": "ea1.2021-01.io.flexmeasures.company:fm1.2", "values": [ 300, 303, @@ -318,7 +319,7 @@ From this, FlexMeasures derives the energy flexibility this battery has in the n { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc", + "event": "ea1.2021-01.io.flexmeasures.company:7:10:203:soc", "value": 12.1, "datetime": "2015-06-02T10:00:00+00:00", "unit": "kWh" diff --git a/flexmeasures/api/common/schemas/users.py b/flexmeasures/api/common/schemas/users.py index 44fe8ddd9..84e14c943 100644 --- a/flexmeasures/api/common/schemas/users.py +++ b/flexmeasures/api/common/schemas/users.py @@ -10,21 +10,23 @@ class AccountIdField(fields.Integer): Field that represents an account ID. It de-serializes from the account id to an account instance. """ - def __init__(self, *args, **kwargs): - kwargs["load_default"] = ( - lambda: current_user.account if not current_user.is_anonymous else None - ) - super().__init__(*args, **kwargs) - - def _deserialize(self, account_id: int, attr, obj, **kwargs) -> Account: + def _deserialize(self, account_id: str, attr, obj, **kwargs) -> Account: account: Account = Account.query.filter_by(id=int(account_id)).one_or_none() if account is None: - raise abort(404, f"Account {id} not found") + raise abort(404, f"Account {account_id} not found") return account def _serialize(self, account: Account, attr, data, **kwargs) -> int: return account.id + @classmethod + def load_current(cls): + """ + Use this with the load_default arg to __init__ if you want the current user's account + by default. + """ + return current_user.account if not current_user.is_anonymous else None + class UserIdField(fields.Integer): """ diff --git a/flexmeasures/api/dev/__init__.py b/flexmeasures/api/dev/__init__.py index 3e9d72301..144d78d6b 100644 --- a/flexmeasures/api/dev/__init__.py +++ b/flexmeasures/api/dev/__init__.py @@ -8,9 +8,13 @@ def register_at(app: Flask): """This can be used to register FlaskViews.""" from flexmeasures.api.dev.sensors import SensorAPI + from flexmeasures.api.dev.assets import AssetAPI from flexmeasures.api.dev.sensor_data import post_data as post_sensor_data_impl - SensorAPI.register(app, route_prefix="/api/dev") + dev_api_prefix = "/api/dev" + + SensorAPI.register(app, route_prefix=dev_api_prefix) + AssetAPI.register(app, route_prefix=dev_api_prefix) @app.route("/sensorData", methods=["POST"]) @auth_token_required diff --git a/flexmeasures/api/dev/assets.py b/flexmeasures/api/dev/assets.py new file mode 100644 index 000000000..19abf8a92 --- /dev/null +++ b/flexmeasures/api/dev/assets.py @@ -0,0 +1,86 @@ +from flask import current_app +from flask_classful import FlaskView, route +from flask_json import as_json +from webargs.flaskparser import use_kwargs, use_args + +from flexmeasures.auth.decorators import permission_required_for_context +from flexmeasures.data.models.user import Account +from flexmeasures.data.models.generic_assets import GenericAsset as AssetModel +from flexmeasures.data.schemas.generic_assets import GenericAssetSchema as AssetSchema +from flexmeasures.api.common.schemas.generic_assets import AssetIdField +from flexmeasures.api.common.schemas.users import AccountIdField +from flexmeasures.data.config import db + + +asset_schema = AssetSchema() +assets_schema = AssetSchema(many=True) + + +class AssetAPI(FlaskView): + """ + This API view exposes generic assets. + Under development until it replaces the original Asset API. + """ + + route_base = "/generic_assets" + + @route("/", methods=["GET"]) + @use_kwargs( + { + "account": AccountIdField( + data_key="account_id", load_default=AccountIdField.load_current + ), + }, + location="query", + ) + @permission_required_for_context("read", arg_name="account") + @as_json + def index(self, account: Account): + """List all assets owned by a certain account.""" + return assets_schema.dump(account.generic_assets), 200 + + @route("/", methods=["POST"]) + @permission_required_for_context( + "create-children", arg_loader=AccountIdField.load_current + ) + @use_args(AssetSchema()) + def post(self, asset_data): + """Create new asset""" + asset = AssetModel(**asset_data) + db.session.add(asset) + db.session.commit() + return asset_schema.dump(asset), 201 + + @route("/", methods=["GET"]) + @use_kwargs({"asset": AssetIdField(data_key="id")}, location="path") + @permission_required_for_context("read", arg_name="asset") + @as_json + def fetch_one(self, id, asset): + """Fetch a given asset""" + return asset_schema.dump(asset), 200 + + @route("/", methods=["PATCH"]) + @use_args(AssetSchema(partial=True)) + @use_kwargs({"db_asset": AssetIdField(data_key="id")}, location="path") + @permission_required_for_context("update", arg_name="db_asset") + @as_json + def patch(self, asset_data, id, db_asset): + """Update an asset given its identifier""" + ignored_fields = ["id", "account_id"] + for k, v in [(k, v) for k, v in asset_data.items() if k not in ignored_fields]: + setattr(db_asset, k, v) + db.session.add(db_asset) + db.session.commit() + return asset_schema.dump(db_asset), 200 + + @route("/", methods=["DELETE"]) + @use_kwargs({"asset": AssetIdField(data_key="id")}, location="path") + @permission_required_for_context("delete", arg_name="asset") + @as_json + def delete(self, id, asset): + """Delete an asset given its identifier""" + asset_name = asset.name + db.session.delete(asset) + db.session.commit() + current_app.logger.info("Deleted asset '%s'." % asset_name) + return {}, 204 diff --git a/flexmeasures/api/dev/tests/conftest.py b/flexmeasures/api/dev/tests/conftest.py index 6c5544fae..623bf574c 100644 --- a/flexmeasures/api/dev/tests/conftest.py +++ b/flexmeasures/api/dev/tests/conftest.py @@ -6,17 +6,19 @@ from flexmeasures.data.models.time_series import Sensor -@pytest.fixture(scope="module", autouse=True) -def setup_api_test_data(db, setup_roles_users): +@pytest.fixture(scope="module") +def setup_api_test_data(db, setup_roles_users, setup_generic_assets): """ Set up data for API dev tests. """ - print("Setting up data for API v2.0 tests on %s" % db.engine) + print("Setting up data for API dev tests on %s" % db.engine) add_gas_sensor(db, setup_roles_users["Test Prosumer User 2"]) @pytest.fixture(scope="function") -def setup_api_fresh_test_data(fresh_db, setup_roles_users_fresh_db): +def setup_api_fresh_test_data( + fresh_db, setup_roles_users_fresh_db, setup_generic_assets_fresh_db +): """ Set up fresh data for API dev tests. """ diff --git a/flexmeasures/api/dev/tests/test_assets_api.py b/flexmeasures/api/dev/tests/test_assets_api.py new file mode 100644 index 000000000..d28422569 --- /dev/null +++ b/flexmeasures/api/dev/tests/test_assets_api.py @@ -0,0 +1,225 @@ +from flask import url_for +import pytest + +from flexmeasures.data.models.generic_assets import GenericAsset +from flexmeasures.data.services.users import find_user_by_email +from flexmeasures.api.tests.utils import get_auth_token, UserContext, AccountContext +from flexmeasures.api.dev.tests.utils import get_asset_post_data + + +@pytest.mark.parametrize("use_auth", [False, True]) +def test_get_assets_badauth(client, setup_api_test_data, use_auth): + """ + Attempt to get assets with wrong or missing auth. + """ + # the case without auth: authentication will fail + headers = {"content-type": "application/json"} + query = {} + if use_auth: + # in this case, we successfully authenticate, but fail authorization + headers["Authorization"] = get_auth_token( + client, "test_dummy_user_3@seita.nl", "testtest" + ) + test_prosumer = find_user_by_email("test_prosumer_user@seita.nl") + query = {"account_id": test_prosumer.account.id} + + get_assets_response = client.get( + url_for("AssetAPI:index"), query_string=query, headers=headers + ) + print("Server responded with:\n%s" % get_assets_response.json) + if use_auth: + assert get_assets_response.status_code == 403 + else: + assert get_assets_response.status_code == 401 + + +def test_get_asset_nonaccount_access(client, setup_api_test_data): + """Without being on the same account, test correct responses when accessing one asset.""" + with UserContext("test_prosumer_user@seita.nl") as prosumer1: + prosumer1_assets = prosumer1.account.generic_assets + with UserContext("test_supplier_user_4@seita.nl") as supplieruser4: + supplieruser4_assets = supplieruser4.account.generic_assets + headers = { + "content-type": "application/json", + "Authorization": get_auth_token( + client, "test_supplier_user_4@seita.nl", "testtest" + ), + } + + # okay to look at assets in own account + asset_response = client.get( + url_for("AssetAPI:fetch_one", id=supplieruser4_assets[0].id), + headers=headers, + follow_redirects=True, + ) + assert asset_response.status_code == 200 + # not okay to see assets owned by other accounts + asset_response = client.get( + url_for("AssetAPI:fetch_one", id=prosumer1_assets[0].id), + headers=headers, + follow_redirects=True, + ) + assert asset_response.status_code == 403 + # proper 404 for non-existing asset + asset_response = client.get( + url_for("AssetAPI:fetch_one", id=8171766575), + headers=headers, + follow_redirects=True, + ) + assert asset_response.status_code == 404 + assert "not found" in asset_response.json["message"] + + +@pytest.mark.parametrize("account_name, num_assets", [("Prosumer", 2), ("Supplier", 1)]) +def test_get_assets( + client, setup_api_test_data, setup_accounts, account_name, num_assets +): + """ + Get assets per account. + Our user here is admin, so is allowed to see all assets. + """ + auth_token = get_auth_token(client, "test_admin_user@seita.nl", "testtest") + + query = {"account_id": setup_accounts[account_name].id} + + get_assets_response = client.get( + url_for("AssetAPI:index"), + query_string=query, + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + print("Server responded with:\n%s" % get_assets_response.json) + assert get_assets_response.status_code == 200 + assert len(get_assets_response.json) == num_assets + + if account_name == "Supplier": # one deep dive + turbine = {} + for asset in get_assets_response.json: + if asset["name"] == "Test wind turbine": + turbine = asset + assert turbine + assert turbine["account_id"] == setup_accounts["Supplier"].id + + +def test_alter_an_asset(client, setup_api_test_data, setup_accounts): + # without being an account-admin, no asset can be created ... + with UserContext("test_prosumer_user@seita.nl") as prosumer1: + auth_token = prosumer1.get_auth_token() # not an account admin + with AccountContext("Test Prosumer Account") as prosumer: + prosumer_asset = prosumer.generic_assets[0] + asset_creation_response = client.post( + url_for("AssetAPI:post"), + headers={"content-type": "application/json", "Authorization": auth_token}, + json={}, + ) + print(f"Creation Response: {asset_creation_response.json}") + assert asset_creation_response.status_code == 403 + # ... or deleted ... + asset_delete_response = client.delete( + url_for("AssetAPI:delete", id=prosumer_asset.id), + headers={"content-type": "application/json", "Authorization": auth_token}, + json={}, + ) + print(f"Deletion Response: {asset_delete_response.json}") + assert asset_delete_response.status_code == 403 + # ... but editing is allowed. + asset_edit_response = client.patch( + url_for("AssetAPI:patch", id=prosumer_asset.id), + headers={"content-type": "application/json", "Authorization": auth_token}, + json={ + "latitude": prosumer_asset.latitude + }, # we're not changing values to keep other tests clean here + ) + print(f"Editing Response: {asset_edit_response.json}") + assert asset_edit_response.status_code == 200 + + +def test_post_an_asset_with_existing_name(client, setup_api_test_data): + """Catch DB error (Unique key violated) correctly""" + with UserContext("test_admin_user@seita.nl") as admin_user: + auth_token = admin_user.get_auth_token() + with AccountContext("Test Prosumer Account") as prosumer: + prosumer_id = prosumer.id + existing_asset = prosumer.generic_assets[0] + post_data = get_asset_post_data() + post_data["name"] = existing_asset.name + post_data["account_id"] = prosumer_id + asset_creation_response = client.post( + url_for("AssetAPI:post"), + json=post_data, + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + print(f"Creation Response: {asset_creation_response.json}") + assert asset_creation_response.status_code == 422 + assert ( + "already exists" in asset_creation_response.json["message"]["json"]["name"][0] + ) + + +def test_post_an_asset_with_nonexisting_field(client, setup_api_test_data): + """Posting a field that is unexpected leads to a 422""" + with UserContext("test_admin_user@seita.nl") as prosumer: + auth_token = prosumer.get_auth_token() + post_data = get_asset_post_data() + post_data["nnname"] = "This field does not exist" + asset_creation = client.post( + url_for("AssetAPI:post"), + json=post_data, + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + assert asset_creation.status_code == 422 + assert asset_creation.json["message"]["json"]["nnname"][0] == "Unknown field." + + +def test_posting_multiple_assets(client, setup_api_test_data): + """We can only send one at a time""" + with UserContext("test_admin_user@seita.nl") as prosumer: + auth_token = prosumer.get_auth_token() + post_data1 = get_asset_post_data() + post_data2 = get_asset_post_data() + post_data2["name"] = "Test battery 3" + asset_creation = client.post( + url_for("AssetAPI:post"), + json=[post_data1, post_data2], + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + print(f"Response: {asset_creation.json}") + assert asset_creation.status_code == 422 + assert asset_creation.json["message"]["json"]["_schema"][0] == "Invalid input type." + + +def test_post_an_asset_with_invalid_data(client, setup_api_test_data): + """ + Add an asset with some fields having invalid data and one field missing. + The right error messages should be in the response and the number of assets has not increased. + """ + with UserContext("test_admin_user@seita.nl") as prosumer: + num_assets_before = len(prosumer.assets) + + auth_token = get_auth_token(client, "test_admin_user@seita.nl", "testtest") + + post_data = get_asset_post_data() + post_data["name"] = "Something new" + post_data["longitude"] = 300.9 + del post_data["generic_asset_type_id"] + + post_asset_response = client.post( + url_for("AssetAPI:post"), + json=post_data, + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + print("Server responded with:\n%s" % post_asset_response.json) + assert post_asset_response.status_code == 422 + + assert ( + "exceeds the maximum longitude" + in post_asset_response.json["message"]["json"]["longitude"][0] + ) + assert ( + "required field" + in post_asset_response.json["message"]["json"]["generic_asset_type_id"][0] + ) + + assert ( + GenericAsset.query.filter_by(account_id=prosumer.id).count() + == num_assets_before + ) diff --git a/flexmeasures/api/dev/tests/test_assets_api_fresh_db.py b/flexmeasures/api/dev/tests/test_assets_api_fresh_db.py new file mode 100644 index 000000000..119cb5ad0 --- /dev/null +++ b/flexmeasures/api/dev/tests/test_assets_api_fresh_db.py @@ -0,0 +1,65 @@ +from flask import url_for +import pytest + +from flexmeasures.api.tests.utils import get_auth_token, AccountContext +from flexmeasures.data.models.generic_assets import GenericAsset +from flexmeasures.api.dev.tests.utils import get_asset_post_data + + +@pytest.mark.parametrize("admin_kind", ["site-admin", "account-admin"]) +def test_post_an_asset_as_admin(client, setup_api_fresh_test_data, admin_kind): + """ + Post one extra asset, as an admin user. + """ + post_data = get_asset_post_data() + if admin_kind == "site-admin": + auth_token = get_auth_token(client, "test_admin_user@seita.nl", "testtest") + else: + auth_token = get_auth_token(client, "test_prosumer_user_2@seita.nl", "testtest") + post_data["name"] = "Test battery 3" + post_assets_response = client.post( + url_for("AssetAPI:post"), + json=post_data, + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + print("Server responded with:\n%s" % post_assets_response.json) + assert post_assets_response.status_code == 201 + assert post_assets_response.json["latitude"] == 30.1 + + asset: GenericAsset = GenericAsset.query.filter( + GenericAsset.name == post_data["name"] + ).one_or_none() + assert asset is not None + assert asset.latitude == 30.1 + + +def test_edit_an_asset(client, setup_api_fresh_test_data): + with AccountContext("Test Prosumer Account") as prosumer: + existing_asset = prosumer.generic_assets[1] + + post_data = dict(latitude=10, id=999) # id will be ignored + auth_token = get_auth_token(client, "test_admin_user@seita.nl", "testtest") + edit_asset_response = client.patch( + url_for("AssetAPI:patch", id=existing_asset.id), + json=post_data, + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + assert edit_asset_response.status_code == 200 + updated_asset = GenericAsset.query.filter_by(id=existing_asset.id).one_or_none() + assert updated_asset.latitude == 10 # changed value + assert updated_asset.longitude == existing_asset.longitude + assert updated_asset.name == existing_asset.name + + +def test_delete_an_asset(client, setup_api_fresh_test_data): + with AccountContext("Test Prosumer Account") as prosumer: + existing_asset_id = prosumer.generic_assets[0].id + + auth_token = get_auth_token(client, "test_admin_user@seita.nl", "testtest") + delete_asset_response = client.delete( + url_for("AssetAPI:delete", id=existing_asset_id), + headers={"content-type": "application/json", "Authorization": auth_token}, + ) + assert delete_asset_response.status_code == 204 + deleted_asset = GenericAsset.query.filter_by(id=existing_asset_id).one_or_none() + assert deleted_asset is None diff --git a/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py b/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py index 7b78fa493..ec3df96b1 100644 --- a/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py +++ b/flexmeasures/api/dev/tests/test_sensor_data_fresh_db.py @@ -1,5 +1,4 @@ import pytest - from flask import url_for from flexmeasures.api.tests.utils import get_auth_token diff --git a/flexmeasures/api/dev/tests/utils.py b/flexmeasures/api/dev/tests/utils.py index 8d655dc6a..920362a65 100644 --- a/flexmeasures/api/dev/tests/utils.py +++ b/flexmeasures/api/dev/tests/utils.py @@ -20,3 +20,14 @@ def make_sensor_data_request_for_gas_sensor( # flatten [] to message["values"] = message["values"][0] return message + + +def get_asset_post_data(account_id: int = 1, asset_type_id: int = 1) -> dict: + post_data = { + "name": "Test battery 2", + "latitude": 30.1, + "longitude": 100.42, + "generic_asset_type_id": asset_type_id, + "account_id": account_id, + } + return post_data diff --git a/flexmeasures/api/tests/utils.py b/flexmeasures/api/tests/utils.py index 5633f87ec..603fa8f21 100644 --- a/flexmeasures/api/tests/utils.py +++ b/flexmeasures/api/tests/utils.py @@ -4,6 +4,8 @@ from flexmeasures.data.config import db from flexmeasures.data.services.users import find_user_by_email +from flexmeasures.data.models.user import Account + """ Useful things for API testing @@ -27,6 +29,31 @@ def get_auth_token(client, user_email, password): return auth_response.json["auth_token"] +class AccountContext(object): + """ + Context manager for a temporary account instance from the DB, + which is expunged from the session at Exit. + Expunging is useful, so that the API call being tested still operates on + a "fresh" session. + While the context is alive, you can collect any useful information, like + the account's assets: + + with AccountContext("Supplier") as supplier: + assets = supplier.generic_assets + """ + + def __init__(self, account_name: str): + self.the_account = Account.query.filter( + Account.name == account_name + ).one_or_none() + + def __enter__(self): + return self.the_account + + def __exit__(self, type, value, traceback): + db.session.expunge(self.the_account) + + class UserContext(object): """ Context manager for a temporary user instance from the DB, @@ -37,7 +64,7 @@ class UserContext(object): the user's assets: with UserContext("test_prosumer_user@seita.nl") as prosumer: - assets = prosumer.assets + user_roles = prosumer.roles """ def __init__(self, user_email: str): diff --git a/flexmeasures/api/v1/tests/conftest.py b/flexmeasures/api/v1/tests/conftest.py index 86880b58f..212ef9406 100644 --- a/flexmeasures/api/v1/tests/conftest.py +++ b/flexmeasures/api/v1/tests/conftest.py @@ -4,8 +4,6 @@ import isodate import pytest -from flask_security.utils import hash_password - from flexmeasures.data.services.users import create_user from flexmeasures.data.models.time_series import TimedBelief @@ -24,7 +22,7 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices test_anonymous_user = create_user( username="anonymous user", email="demo@seita.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts["Dummy"].name, user_roles=[ dict(name="anonymous", description="Anonymous test user"), @@ -50,16 +48,6 @@ def setup_api_test_data(db, setup_accounts, setup_roles_users, add_market_prices assets.append(asset) db.session.add(asset) - """ - # Create a test user without a USEF role - create_user( - username="test user without roles", - email="test_prosumer_user@seita.nl", - password=hash_password("testtest"), - account_name=setup_accounts["Prosumer"].name, - ) - """ - # Create 5 test assets for the test user test_user = setup_roles_users["Test Prosumer User"] asset_names = ["CS 1", "CS 2", "CS 3", "CS 4", "CS 5"] diff --git a/flexmeasures/api/v1_1/routes.py b/flexmeasures/api/v1_1/routes.py index 4922ad871..eec003f20 100644 --- a/flexmeasures/api/v1_1/routes.py +++ b/flexmeasures/api/v1_1/routes.py @@ -83,10 +83,10 @@ def get_connection(): { "type": "GetConnectionResponse", "connections": [ - "ea1.2018-06.io.flexmeasures.company:3:4", - "ea1.2018-06.io.flexmeasures.company:8:3", - "ea1.2018-06.io.flexmeasures.company:9:2", - "ea1.2018-06.io.flexmeasures.company:3:1" + "ea1.2021-01.io.flexmeasures.company:3:4", + "ea1.2021-01.io.flexmeasures.company:8:3", + "ea1.2021-01.io.flexmeasures.company:9:2", + "ea1.2021-01.io.flexmeasures.company:3:1" ], "names": [ "CS 4", @@ -132,7 +132,7 @@ def post_price_data(): { "type": "PostPriceDataRequest", - "market": "ea1.2018-06.localhost:epex_da", + "market": "ea1.2021-01.io.flexmeasures.company:epex_da", "values": [ 52.37, 51.14, @@ -220,7 +220,7 @@ def post_weather_data(): "type": "PostWeatherDataRequest", "groups": [ { - "sensor": "ea1.2018-06.localhost:temperature:33.4843866:126.477859", + "sensor": "ea1.2021-01.io.flexmeasures.company:temperature:33.4843866:126.477859", "values": [ 20.04, 20.23, diff --git a/flexmeasures/api/v1_2/routes.py b/flexmeasures/api/v1_2/routes.py index 0bde6343d..0693905c3 100644 --- a/flexmeasures/api/v1_2/routes.py +++ b/flexmeasures/api/v1_2/routes.py @@ -57,7 +57,7 @@ def get_device_message(): { "type": "GetDeviceMessageRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc" + "event": "ea1.2021-01.io.flexmeasures.company:fm0.7:10:203:soc" } **Example response** @@ -69,7 +69,7 @@ def get_device_message(): { "type": "GetDeviceMessageResponse", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc", + "event": "ea1.2021-01.io.flexmeasures.company:fm0.7:10:203:soc", "values": [ 2.15, 3, @@ -112,7 +112,7 @@ def post_udi_event(): { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc", + "event": "ea1.2021-01.io.flexmeasures.company:fm0.7:10:203:soc", "value": 12.1, "unit": "kWh", "datetime": "2015-06-02T10:00:00+00:00", diff --git a/flexmeasures/api/v1_3/routes.py b/flexmeasures/api/v1_3/routes.py index 4154c7693..a7a3c56de 100644 --- a/flexmeasures/api/v1_3/routes.py +++ b/flexmeasures/api/v1_3/routes.py @@ -39,7 +39,7 @@ def get_device_message(): { "type": "GetDeviceMessageRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc" + "event": "ea1.2021-01.io.flexmeasures.company:fm0.7:10:203:soc" } **Example response** @@ -51,7 +51,7 @@ def get_device_message(): { "type": "GetDeviceMessageResponse", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc", + "event": "ea1.2021-01.io.flexmeasures.company:fm0.7:10:203:soc", "values": [ 2.15, 3, @@ -93,7 +93,7 @@ def post_udi_event(): { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:203:soc", + "event": "ea1.2021-01.io.flexmeasures.company:7:10:203:soc", "value": 12.1, "unit": "kWh", "datetime": "2015-06-02T10:00:00+00:00" @@ -110,7 +110,7 @@ def post_udi_event(): { "type": "PostUdiEventRequest", - "event": "ea1.2018-06.io.flexmeasures.company:7:10:204:soc-with-targets", + "event": "ea1.2021-01.io.flexmeasures.company:fm0.7:10:204:soc-with-targets", "value": 12.1, "unit": "kWh", "datetime": "2015-06-02T10:00:00+00:00", diff --git a/flexmeasures/api/v2_0/implementations/assets.py b/flexmeasures/api/v2_0/implementations/assets.py index 97a65d19e..0438aa633 100644 --- a/flexmeasures/api/v2_0/implementations/assets.py +++ b/flexmeasures/api/v2_0/implementations/assets.py @@ -17,6 +17,11 @@ from flexmeasures.api.common.responses import required_info_missing +""" +Deprecated. Use /api/dev/generic_assets. +TODO: Can/should we add a deprecation warning to responses? +""" + asset_schema = AssetSchema() assets_schema = AssetSchema(many=True) diff --git a/flexmeasures/api/v2_0/implementations/users.py b/flexmeasures/api/v2_0/implementations/users.py index 7ed5c1964..aba0ba7aa 100644 --- a/flexmeasures/api/v2_0/implementations/users.py +++ b/flexmeasures/api/v2_0/implementations/users.py @@ -29,7 +29,9 @@ @use_kwargs( { - "account": AccountIdField(data_key="account_id"), + "account": AccountIdField( + data_key="account_id", load_default=AccountIdField.load_current + ), "include_inactive": fields.Bool(load_default=False), }, location="query", diff --git a/flexmeasures/api/v2_0/routes.py b/flexmeasures/api/v2_0/routes.py index 65139c3a9..b346591bf 100644 --- a/flexmeasures/api/v2_0/routes.py +++ b/flexmeasures/api/v2_0/routes.py @@ -104,6 +104,8 @@ def get_assets(): If no owner is set, all accessible assets are returned. A non-admin user can only access their own assets. + .. warning:: This API will be replaced by the new-style asset and sensor APIs. The former is already working at at ``/api/dev/generic_assets``. See also :ref:`note_on_datamodel_transition`. + **Example response** An example of one asset being returned: @@ -157,6 +159,8 @@ def post_assets(): This endpoint creates a new asset. Only users with the admin role are allowed to create assets. + .. warning:: This API will be replaced by the new-style asset and sensor APIs. The former is already working at at ``/api/dev/generic_assets``. See also :ref:`note_on_datamodel_transition`. + **Example request** The following example contains the required fields only, plus the two state of charge (soc) fields @@ -228,6 +232,8 @@ def get_asset(id: int): This endpoint gets an asset. Only users who own the asset can use this endpoint. + .. warning:: This API will be replaced by the new-style asset and sensor APIs. The former is already working at at ``/api/dev/generic_assets``. See also :ref:`note_on_datamodel_transition`. + **Example response** .. sourcecode:: json @@ -274,6 +280,8 @@ def patch_asset(id: int): Any subset of asset fields can be sent. Only users who own the asset are allowed to update its data. + .. warning:: This API will be replaced by the new-style asset and sensor APIs. The former is already working at at ``/api/dev/generic_assets``. See also :ref:`note_on_datamodel_transition`. + Several fields are not allowed to be updated, e.g. id. They are ignored. **Example request** @@ -336,6 +344,8 @@ def delete_asset(id: int): This endpoint deletes an existing asset, as well as all measurements recorded for it. Only users who own the asset are allowed to delete the asset. + .. warning:: This API will be replaced by the new-style asset and sensor APIs. The former is already working at at ``/api/dev/generic_assets``. See also :ref:`note_on_datamodel_transition`. + :reqheader Authorization: The authentication token :reqheader Content-Type: application/json :resheader Content-Type: application/json @@ -515,7 +525,7 @@ def get_connection(): def post_price_data(): """API endpoint to post price data. - .. :quickref: User; Upload price data to the platform + .. :quickref: Data; Upload price data to the platform **Optional fields** @@ -525,14 +535,14 @@ def post_price_data(): **Example request** This "PostPriceDataRequest" message posts prices for hourly intervals between midnight and midnight the next day - for the EPEX SPOT day-ahead auction. + for the EPEX SPOT day-ahead auction, registered as sensor 12. The prior indicates that the prices were published at 1pm on December 31st 2020. .. code-block:: json { "type": "PostPriceDataRequest", - "market": "ea1.2018-06.localhost:epex_da", + "market": "ea1.2021-01.io.flexmeasures.company:fm1.12", "values": [ 52.37, 51.14, @@ -601,9 +611,7 @@ def post_weather_data(): - "temperature" (with °C as unit) - "wind_speed" (with m/s as unit) - The sensor type is part of the unique entity address for each sensor, together with the sensor's latitude and longitude. - - .. :quickref: User; Upload weather data to the platform + .. :quickref: Data; Upload weather data to the platform **Optional fields** @@ -613,7 +621,7 @@ def post_weather_data(): **Example request** This "PostWeatherDataRequest" message posts temperature forecasts for 15-minute intervals between 3.00pm and 4.30pm - for a weather sensor located at latitude 33.4843866 and longitude 126.477859. The forecasts were made at noon. + for a weather sensor with id 602. The forecasts were made at noon. .. code-block:: json @@ -621,7 +629,7 @@ def post_weather_data(): "type": "PostWeatherDataRequest", "groups": [ { - "sensor": "ea1.2018-06.localhost:temperature:33.4843866:126.477859", + "sensor": "ea1.2021-01.io.flexmeasures.company:fm1.602", "values": [ 20.04, 20.23, @@ -690,7 +698,7 @@ def get_meter_data(): def post_meter_data(): """API endpoint to post meter data. - .. :quickref: User; Upload meter data to the platform + .. :quickref: Data; Upload meter data to the platform **Optional fields** @@ -700,7 +708,7 @@ def post_meter_data(): **Example request** This "PostMeterDataRequest" message posts measured consumption for 15-minute intervals between 0.00am and 1.30am for - charging stations 1, 2 and 3 (negative values denote production). + connections 3, 4 and 5 (negative values denote production). .. code-block:: json @@ -709,8 +717,8 @@ def post_meter_data(): "groups": [ { "connections": [ - "CS 1", - "CS 3" + "ea1.2021-01.io.flexmeasures.company:fm1.3", + "ea1.2021-01.io.flexmeasures.company:fm1.4" ], "values": [ 306.66, @@ -723,7 +731,7 @@ def post_meter_data(): }, { "connections": [ - "CS 2" + "ea1.2021-01.io.flexmeasures.company:fm1.5" ], "values": [ 306.66, @@ -773,7 +781,7 @@ def post_meter_data(): def post_prognosis(): """API endpoint to post prognoses about meter data. - .. :quickref: User; Upload prognosis to the platform + .. :quickref: Data; Upload prognosis to the platform **Optional fields** @@ -783,7 +791,7 @@ def post_prognosis(): **Example request** This "PostPrognosisRequest" message posts prognosed consumption for 15-minute intervals between 0.00am and 1.30am for - charging stations 1, 2 and 3 (negative values denote production), prognosed at 6pm the previous day. + connections 3, 4 and 5 (negative values denote production), prognosed at 6pm the previous day. .. code-block:: json @@ -792,8 +800,8 @@ def post_prognosis(): "groups": [ { "connections": [ - "ea1.2018-06.localhost:1:3", - "ea1.2018-06.localhost:1:4" + "ea1.2021-01.io.flexmeasures.company:fm1.3", + "ea1.2021-01.io.flexmeasures.company:fm1.4" ], "values": [ 300, @@ -806,7 +814,7 @@ def post_prognosis(): }, { "connections": [ - "ea1.2018-06.localhost:1:5" + "ea1.2021-01.io.flexmeasures.company:fm1.5" ], "values": [ 300, diff --git a/flexmeasures/auth/decorators.py b/flexmeasures/auth/decorators.py index 96aa2de79..d783dbd6b 100644 --- a/flexmeasures/auth/decorators.py +++ b/flexmeasures/auth/decorators.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Callable, Optional from functools import wraps from flask import current_app from flask_json import as_json @@ -104,12 +104,22 @@ def decorated_view(*args, **kwargs): def permission_required_for_context( - permission: str, arg_pos: Optional[int] = None, arg_name: Optional[str] = None + permission: str, + arg_pos: Optional[int] = None, + arg_name: Optional[str] = None, + arg_loader: Optional[Callable] = None, ): """ This decorator can be used to make sure that the current user has the necessary permission to access the context. - The context needs to be an AuthModelMixin and is found in the keyword arguments by name and/or by a position in the non-keyword arguments (defaults to 0). - Using both arguments is useful when Marshmallow places a dict of de-serialized fields and you are using use_args. + The context needs to be an AuthModelMixin and is found ... + - by loading it via the arg_loader callable; + - otherwise: + * by the keyword argument arg_name; + * and/or by a position in the non-keyword arguments (arg_pos). + If nothing is passed, the context lookup defaults to arg_pos=0. + + Using both arg_name and arg_pos arguments is useful when Marshmallow de-serializes to a dict and you are using use_args. In this case, the context lookup applies first arg_pos, then arg_name. + The permission needs to be a known permission and is checked with principal descriptions from the context's access control list (see AuthModelMixin.__acl__). Usually, you'd place a marshmallow field further up in the decorator chain, e.g.: @@ -121,10 +131,10 @@ def permission_required_for_context( ) @permission_required_for_context("read", arg_name="the_resource") @as_json - def view(resource_id: int, resource: Resource): - return dict(name=resource.name) + def view(resource_id: int, the_resource: Resource): + return dict(name=the_resource.name) - Where `ResourceIdField._deserialize()` turns the id parameter into a resource context (if possible). + Where `ResourceIdField._deserialize()` turns the id parameter into a Resource context (if possible). This decorator raises a 403 response if there is no principal for the required permission. It raises a 401 response if the user is not authenticated at all. @@ -138,8 +148,10 @@ def decorated_view(*args, **kwargs): if current_user.is_anonymous: raise Unauthorized() # load & check context - if arg_pos is not None and arg_name is not None: - context: AuthModelMixin = args[arg_pos][arg_name] + if arg_loader is not None: + context: AuthModelMixin = arg_loader() + elif arg_pos is not None and arg_name is not None: + context = args[arg_pos][arg_name] elif arg_pos is not None: context = args[arg_pos] elif arg_name is not None: @@ -157,6 +169,9 @@ def decorated_view(*args, **kwargs): # now check access, either with admin rights or principal(s) acl = context.__acl__() principals = acl.get(permission, tuple()) + current_app.logger.debug( + f"Looking for {permission}-permission on {context} ... Principals: {principals}" + ) if not user_has_admin_access( current_user, permission ) and not user_matches_principals(current_user, principals): diff --git a/flexmeasures/auth/policy.py b/flexmeasures/auth/policy.py index de6eb07a1..756e8afa0 100644 --- a/flexmeasures/auth/policy.py +++ b/flexmeasures/auth/policy.py @@ -1,9 +1,9 @@ -from typing import Dict, Union, Tuple +from typing import Dict, Union, Tuple, List from flask import current_app -PERMISSIONS = ["create", "read", "update", "delete"] +PERMISSIONS = ["create-children", "read", "read-children", "update", "delete"] ADMIN_ROLE = "admin" ADMIN_READER_ROLE = "admin-reader" @@ -11,37 +11,58 @@ # constants to allow access to certain groups EVERYONE = "everyone" +PRINCIPALS_TYPE = Union[str, Tuple[str], List[Union[str, Tuple[str]]]] + class AuthModelMixin(object): - def __acl__(self) -> Dict[str, Union[str, Tuple[str]]]: + def __acl__(self) -> Dict[str, PRINCIPALS_TYPE]: """ - Access control list for a resource instance. Inspired by Pyramid's resource ACLs. + This function returns an access control list (ACL) for a instance of a model which is relevant for authorization. + + ACLs in FlexMeasures are inspired by Pyramid's resource ACLs. + In an ACL, we list which principal (security contexts, see below) allow certain kinds of actions + ― by mapping supported permissions to the required principals. - This function returns a mapping of permissions to principal descriptors. + # What is a principal / security context? - In computer security, a principal is the security context of the authenticated user [1]. - In the access control list, we list which principal aspects allow certain kinds of actions. + In computer security, a "principal" is the security context of the authenticated user [1]. + For example, within FlexMeasures, an accepted principal is "user:2", which denotes that the user should have ID 2 + (more technical specifications follow below). - In these access control lists, we allow to codify user and account roles, as well as user and account IDs. + # Example - Here are some (fictional) examples: + Here are some examples of principals mapped to permissions in a fictional ACL: { - "create": "account:3", # Everyone in Account 3 can create + "create-children": "account:3", # Everyone in Account 3 can create child items (e.g. beliefs for a sensor) "read": EVERYONE, # Reading is available to every logged-in user - "update": "user:14", # This user can update, ... - "update": "user:15", # and also this user, ... - "update": "account-role:MDC", # also people in such accounts can update + "update": ["user:14", # This user can update, ... + user:15"], # and also this user, ... + "update": "account-role:MDC", # also people in such accounts can update "delete": ("account:3", "role:CEO"), # Only CEOs of Account 3 can delete } Such a list of principals can be checked with match_principals, see below. - Notes: + # Specifications of principals + + Within FlexMeasures, a principal is handled as a string, usually defining context and identification, like so: + + :. + + Supported contexts are user and account IDs, as well as user and account roles. All of them feature in the example above. + + Iterable principal descriptors should be treated as follows: + - a list contains OR-connected items, which can be principal or tuples of principals (one of the items in the list is sufficient to grant the permission) + - a tuple contains AND-connected strings (you need all of the items in the list to grant the permission). + + # Row-level authorization - - Iterable principal descriptors should be treated as to be AND-connected. This helps to define subsets, - like the deletion example above. - - This is row-level authorization, which requires an instance. We are considering table-level authorization, which wouldn't, so it would allow for faster authorization checks if no instances are needed. + This ACL approach to authorization is usually called "row-level authorization" ― it always requires an instance, from which to get the ACL. + Unlike pyramid, we have not implemented table-level authorization, where a class also can provide an ACL. + This works because we make use of the hierarchy in our model. + The highest level (e.g. an account) is created by site-admins and usually not in the API, but CLI. For everything else, we can ask the ACL + on an instance, if we can handle it like we intend to. For creation of instances (where there is no instance to ask), it makes sense to use the instance one level up to look up the correct permission ("create-children"). E.g. to create belief data for a sensor, we can check the "create-children" - permission on the sensor. [1] https://docs.microsoft.com/en-us/windows/security/identity-protection/access-control/security-principals#a-href-idw2k3tr-princ-whatawhat-are-security-principals """ @@ -56,29 +77,32 @@ def user_has_admin_access(user, permission: str) -> bool: return False -def user_matches_principals(user, principals: Union[str, Tuple[str]]) -> bool: +def user_matches_principals(user, principals: PRINCIPALS_TYPE) -> bool: """ Tests if the user matches all passed principals. Returns False if no principals are passed. """ - if isinstance(principals, str): - principals = (principals,) - if EVERYONE in principals: - return True - if user is None: - return False - if all( - [ - ( - check_user_identity(user, principal) - or check_user_role(user, principal) - or check_account_membership(user, principal) - or check_account_role(user, principal) - ) - for principal in principals - ] - ): - return True + if not isinstance(principals, list): + principals = [principals] # now we handle a list of str or Tuple[str] + for matchable_principals in principals: + if isinstance(matchable_principals, str): + matchable_principals = ( + matchable_principals, + ) # now we handle only Tuple[str] + if EVERYONE in matchable_principals: + return True + if user is not None and all( + [ + ( + check_user_identity(user, principal) + or check_user_role(user, principal) + or check_account_membership(user, principal) + or check_account_role(user, principal) + ) + for principal in matchable_principals + ] + ): + return True return False diff --git a/flexmeasures/auth/tests/test_principal_matching.py b/flexmeasures/auth/tests/test_principal_matching.py index 0666ab369..193bd5b01 100644 --- a/flexmeasures/auth/tests/test_principal_matching.py +++ b/flexmeasures/auth/tests/test_principal_matching.py @@ -81,6 +81,16 @@ def make_mock_user( ), True, ), + ( + make_mock_user(19, ["waitress"], 113, ["hotel"]), + ["user:13", ("account:113", "role:waitress", "role:chef")], + False, # not user 13; well a waitress, but not also a chef of hotel 113 + ), + ( + make_mock_user(19, ["waitress"], 113, ["hotel"]), + ["user:13", ("account:113", "role:waitress"), "role:chef"], + True, # not user 13; well a waitress of hotel 113 - + ), ], ) def test_principals_match(mock_user, principals, should_match): diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 1ae5a0560..1859ddbe8 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -7,7 +7,6 @@ import pytz from flask import current_app as app from flask.cli import with_appcontext -from flask_security.utils import hash_password import click import getpass from sqlalchemy.exc import IntegrityError @@ -139,7 +138,7 @@ def new_user( created_user = create_user( username=username, email=email, - password=hash_password(pwd1), + password=pwd1, account_name=account.name, timezone=timezone, user_roles=roles, diff --git a/flexmeasures/conftest.py b/flexmeasures/conftest.py index 28bbbf0e3..656a75426 100644 --- a/flexmeasures/conftest.py +++ b/flexmeasures/conftest.py @@ -10,7 +10,6 @@ from flask import request, jsonify from flask_sqlalchemy import SQLAlchemy from flask_security import roles_accepted -from flask_security.utils import hash_password from werkzeug.exceptions import ( InternalServerError, BadRequest, @@ -166,7 +165,7 @@ def create_roles_users(db, test_accounts) -> Dict[str, User]: username="Test Prosumer User", email="test_prosumer_user@seita.nl", account_name=test_accounts["Prosumer"].name, - password=hash_password("testtest"), + password="testtest", # TODO: test some normal user roles later in our auth progress # user_roles=dict(name="", description=""), ) @@ -176,10 +175,8 @@ def create_roles_users(db, test_accounts) -> Dict[str, User]: username="Test Prosumer User 2", email="test_prosumer_user_2@seita.nl", account_name=test_accounts["Prosumer"].name, - password=hash_password("testtest"), - # TODO: Test some normal user roles later in our auth progress. - # This user will then differ from the user above - # user_roles=dict(name="", description=""), + password="testtest", + user_roles=dict(name="account-admin", description="Admin for this account"), ) ) # A user on an account without any special rights @@ -188,7 +185,16 @@ def create_roles_users(db, test_accounts) -> Dict[str, User]: username="Test Dummy User", email="test_dummy_user_3@seita.nl", account_name=test_accounts["Dummy"].name, - password=hash_password("testtest"), + password="testtest", + ) + ) + # A supplier user + new_users.append( + create_user( + username="Test Supplier User", + email="test_supplier_user_4@seita.nl", + account_name=test_accounts["Supplier"].name, + password="testtest", ) ) # One platform admin @@ -199,7 +205,7 @@ def create_roles_users(db, test_accounts) -> Dict[str, User]: account_name=test_accounts[ "Dummy" ].name, # the account does not give rights - password=hash_password("testtest"), + password="testtest", user_roles=dict( name=ADMIN_ROLE, description="A user who can do everything." ), @@ -260,28 +266,77 @@ def setup_asset_types_fresh_db(fresh_db) -> Dict[str, AssetType]: @pytest.fixture(scope="module") -def setup_generic_asset(db, setup_generic_asset_type) -> Dict[str, AssetType]: +def setup_generic_assets( + db, setup_generic_asset_types, setup_accounts +) -> Dict[str, AssetType]: """Make some generic assets used throughout.""" + return create_generic_assets(db, setup_generic_asset_types, setup_accounts) + + +@pytest.fixture(scope="function") +def setup_generic_assets_fresh_db( + fresh_db, setup_generic_asset_types_fresh_db, setup_accounts_fresh_db +) -> Dict[str, AssetType]: + """Make some generic assets used throughout.""" + return create_generic_assets( + fresh_db, setup_generic_asset_types_fresh_db, setup_accounts_fresh_db + ) + + +def create_generic_assets(db, setup_generic_asset_types, setup_accounts): troposphere = GenericAsset( - name="troposphere", generic_asset_type=setup_generic_asset_type["public_good"] + name="troposphere", generic_asset_type=setup_generic_asset_types["public_good"] ) db.session.add(troposphere) - return dict(troposphere=troposphere) + test_battery = GenericAsset( + name="Test battery", + generic_asset_type=setup_generic_asset_types["battery"], + account_id=setup_accounts["Prosumer"].id, + ) + db.session.add(test_battery) + test_wind_turbine = GenericAsset( + name="Test wind turbine", + generic_asset_type=setup_generic_asset_types["wind"], + account_id=setup_accounts["Supplier"].id, + ) + db.session.add(test_wind_turbine) + + return dict( + troposphere=troposphere, + test_battery=test_battery, + test_wind_turbine=test_wind_turbine, + ) @pytest.fixture(scope="module") -def setup_generic_asset_type(db) -> Dict[str, AssetType]: +def setup_generic_asset_types(db) -> Dict[str, AssetType]: """Make some generic asset types used throughout.""" + return create_generic_asset_types(db) + +@pytest.fixture(scope="function") +def setup_generic_asset_types_fresh_db(fresh_db) -> Dict[str, AssetType]: + """Make some generic asset types used throughout.""" + return create_generic_asset_types(fresh_db) + + +def create_generic_asset_types(db): public_good = GenericAssetType( name="public good", ) db.session.add(public_good) - return dict(public_good=public_good) + solar = GenericAssetType(name="solar") + db.session.add(solar) + wind = GenericAssetType(name="wind") + db.session.add(wind) + battery = GenericAssetType(name="battery") + db.session.add(battery) + return dict(public_good=public_good, solar=solar, wind=wind, battery=battery) def create_test_asset_types(db) -> Dict[str, AssetType]: - """Make some asset types used throughout.""" + """Make some asset types used throughout. + Deprecated. Remove with Asset model.""" solar = AssetType( name="solar", @@ -306,7 +361,8 @@ def create_test_asset_types(db) -> Dict[str, AssetType]: def setup_assets( db, setup_roles_users, setup_markets, setup_sources, setup_asset_types ) -> Dict[str, Asset]: - """Add assets to known test users.""" + """Add assets to known test users. + Deprecated. Remove with Asset model.""" assets = [] for asset_name in ["wind-asset-1", "wind-asset-2", "solar-asset-1"]: @@ -617,10 +673,10 @@ def create_weather_sensors(db: SQLAlchemy): @pytest.fixture(scope="module") -def add_sensors(db: SQLAlchemy, setup_generic_asset): +def add_sensors(db: SQLAlchemy, setup_generic_assets): """Add some generic sensors.""" height_sensor = Sensor( - name="height", unit="m", generic_asset=setup_generic_asset["troposphere"] + name="height", unit="m", generic_asset=setup_generic_assets["troposphere"] ) db.session.add(height_sensor) return height_sensor diff --git a/flexmeasures/data/migrations/versions/a918360f7d63_add_unique_contraints_on_.py b/flexmeasures/data/migrations/versions/a918360f7d63_add_unique_contraints_on_.py new file mode 100644 index 000000000..8f0414819 --- /dev/null +++ b/flexmeasures/data/migrations/versions/a918360f7d63_add_unique_contraints_on_.py @@ -0,0 +1,37 @@ +"""add unique contraints: on GenericAssetType.name & on GenericAssets for name+account_id + +Revision ID: a918360f7d63 +Revises: 830e72a8b218 +Create Date: 2022-01-01 22:08:50.163734 + +""" +from alembic import op + + +# revision identifiers, used by Alembic. +revision = "a918360f7d63" +down_revision = "830e72a8b218" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_unique_constraint( + "generic_asset_name_account_id_key", "generic_asset", ["name", "account_id"] + ) + op.create_unique_constraint( + op.f("generic_asset_type_name_key"), "generic_asset_type", ["name"] + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + op.f("generic_asset_type_name_key"), "generic_asset_type", type_="unique" + ) + op.drop_constraint( + "generic_asset_name_account_id_key", "generic_asset", type_="unique" + ) + # ### end Alembic commands ### diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index b089b1051..49415fedc 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -39,10 +39,14 @@ class AssetType(db.Model): yearly_seasonality = db.Column(db.Boolean(), nullable=False, default=False) def __init__(self, **kwargs): - generic_asset_type = GenericAssetType( - name=kwargs["name"], description=kwargs.get("hover_label", None) - ) - db.session.add(generic_asset_type) + generic_asset_type = GenericAssetType.query.filter_by( + name=kwargs["name"] + ).one_or_none() + if not generic_asset_type: + generic_asset_type = GenericAssetType( + name=kwargs["name"], description=kwargs.get("hover_label", None) + ) + db.session.add(generic_asset_type) super(AssetType, self).__init__(**kwargs) self.name = self.name.replace(" ", "_").lower() if "display_name" not in kwargs: diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index a14d2d690..ce90c900d 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -6,8 +6,8 @@ from sqlalchemy.ext.hybrid import hybrid_method from sqlalchemy.sql.expression import func - from sqlalchemy.ext.mutable import MutableDict +from sqlalchemy.schema import UniqueConstraint from flexmeasures.data import db from flexmeasures.data.models.user import User @@ -22,7 +22,7 @@ class GenericAssetType(db.Model): """ id = db.Column(db.Integer, primary_key=True) - name = db.Column(db.String(80), default="") + name = db.Column(db.String(80), default="", unique=True) description = db.Column(db.String(80), nullable=True, unique=False) @@ -48,18 +48,36 @@ class GenericAsset(db.Model, AuthModelMixin): backref=db.backref("generic_assets", lazy=True), ) + __table_args__ = ( + UniqueConstraint( + "name", + "account_id", + name="generic_asset_name_account_id_key", + ), + ) + def __acl__(self): """ Within same account, everyone can read and update. - Creation and deletion are left to site admins in CLI. - - TODO: needs an iteration + Creation and deletion are left to account admins (a role we don't use yet). + Note: creation is not relevant on a GenericAsset object (as it already exists), + but we might want to use this permission to check if data *within* the asset, + like sensors, can be created. See the discussion in auth/policy. """ return { + "create-children": (f"account:{self.account_id}", "role:account-admin"), "read": f"account:{self.account_id}", "update": f"account:{self.account_id}", + "delete": (f"account:{self.account_id}", "role:account-admin"), } + def __repr__(self): + return "" % ( + self.id, + self.name, + self.generic_asset_type.name, + ) + @property def asset_type(self) -> GenericAssetType: """ This property prepares for dropping the "generic" prefix later""" diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index c1be68986..5513d5d0b 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -73,13 +73,19 @@ def __init__( def __acl__(self): """ Within same account, everyone can read and update. - Creation and deletion are left to site admins in CLI. - - TODO: needs an iteration + Deletion needs the account-admin role. """ return { + "create-children": ( + f"account:{self.generic_asset.account_id}", + "role:account-admin", + ), "read": f"account:{self.generic_asset.account_id}", "update": f"account:{self.generic_asset.account_id}", + "delete": ( + f"account:{self.generic_asset.account_id}", + "role:account-admin", + ), } @property diff --git a/flexmeasures/data/models/user.py b/flexmeasures/data/models/user.py index 29b869a5a..f17d9ffe1 100644 --- a/flexmeasures/data/models/user.py +++ b/flexmeasures/data/models/user.py @@ -47,10 +47,15 @@ def __repr__(self): def __acl__(self): """ + Only account admins can create things in the account (e.g. users or assets). Within same account, everyone can read and update. - Creation and deletion are left to site admins in CLI. + Creation and deletion of accounts are left to site admins in CLI. """ - return {"read": f"account:{self.id}", "update": f"account:{self.id}"} + return { + "create-children": (f"account:{self.id}", "role:account-admin"), + "read": f"account:{self.id}", + "update": f"account:{self.id}", + } def has_role(self, role: Union[str, AccountRole]) -> bool: """Returns `True` if the account has the specified role. @@ -115,10 +120,17 @@ def __repr__(self): def __acl__(self): """ - Within same account, everyone can read. Only the user themselves can edit their user record. + Within same account, everyone can read. + Only the user themselves or account-admins can edit their user record. Creation and deletion are left to site admins in CLI. """ - return {"read": f"account:{self.account_id}", "update": f"user:{self.id}"} + return { + "read": f"account:{self.account_id}", + "update": [ + f"user:{self.id}", + (f"account:{self.account_id}", "role:account-admin"), + ], + } @property def is_authenticated(self) -> bool: diff --git a/flexmeasures/data/queries/generic_assets.py b/flexmeasures/data/queries/generic_assets.py index c3058e811..4f6cac052 100644 --- a/flexmeasures/data/queries/generic_assets.py +++ b/flexmeasures/data/queries/generic_assets.py @@ -1,16 +1,20 @@ -from typing import List, Union +from typing import List, Union, Optional from sqlalchemy.orm import Query from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType -def query_assets_by_type(type_names: Union[List[str], str]) -> Query: +def query_assets_by_type( + type_names: Union[List[str], str], query: Optional[Query] = None +) -> Query: """ Return a query which looks for GenericAssets by their type. Pass in a list of type names or only one type name. """ - query = GenericAsset.query.join(GenericAssetType).filter( + if not query: + query = GenericAsset.query + query = query.join(GenericAssetType).filter( GenericAsset.generic_asset_type_id == GenericAssetType.id ) if isinstance(type_names, str): diff --git a/flexmeasures/data/schemas/assets.py b/flexmeasures/data/schemas/assets.py index aed32ade6..db4bd6e31 100644 --- a/flexmeasures/data/schemas/assets.py +++ b/flexmeasures/data/schemas/assets.py @@ -10,6 +10,8 @@ class AssetSchema(SensorSchemaMixin, ma.SQLAlchemySchema): """ Asset schema, with validations. + + TODO: deprecate, as it is based on legacy data model. Move some attributes to SensorSchema. """ class Meta: diff --git a/flexmeasures/data/schemas/generic_assets.py b/flexmeasures/data/schemas/generic_assets.py index 290d02b03..189d0d353 100644 --- a/flexmeasures/data/schemas/generic_assets.py +++ b/flexmeasures/data/schemas/generic_assets.py @@ -1,6 +1,6 @@ from typing import Optional -from marshmallow import validates, ValidationError, fields +from marshmallow import validates, validates_schema, ValidationError, fields from flexmeasures.data import ma from flexmeasures.data.models.user import Account @@ -13,15 +13,28 @@ class GenericAssetSchema(ma.SQLAlchemySchema): """ id = ma.auto_field() - name = fields.Str() + name = fields.Str(required=True) account_id = ma.auto_field() latitude = ma.auto_field() longitude = ma.auto_field() - generic_asset_type_id = fields.Integer() + generic_asset_type_id = fields.Integer(required=True) class Meta: model = GenericAsset + @validates_schema(skip_on_field_errors=False) + def validate_name_is_unique_in_account(self, data, **kwargs): + if "name" in data and "account_id" in data: + asset = GenericAsset.query.filter( + GenericAsset.name == data["name"] + and GenericAsset.account_id == data["account_id"] + ).one_or_none() + if asset: + raise ValidationError( + f"An asset with the name {data['name']} already exists in this account.", + "name", + ) + @validates("generic_asset_type_id") def validate_generic_asset_type(self, generic_asset_type_id: int): generic_asset_type = GenericAssetType.query.get(generic_asset_type_id) diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index 28b9459cd..3af10f4fd 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -65,7 +65,7 @@ def get_sensors( owner_id: Optional[int] = None, order_by_asset_attribute: str = "id", order_direction: str = "desc", -) -> List[Asset]: +) -> List[Sensor]: """Return a list of all Sensor objects owned by current_user's organisation account (or all users or a specific user - for this, admins can set an owner_id). """ diff --git a/flexmeasures/data/services/users.py b/flexmeasures/data/services/users.py index f3e985d40..a75bcf14d 100644 --- a/flexmeasures/data/services/users.py +++ b/flexmeasures/data/services/users.py @@ -11,6 +11,7 @@ EmailNotValidError, EmailUndeliverableError, ) +from flask_security.utils import hash_password from werkzeug.exceptions import NotFound from flexmeasures.data.config import db @@ -74,13 +75,18 @@ def find_user_by_email(user_email: str, keep_in_session: bool = True) -> User: def create_user( # noqa: C901 + password: str = None, user_roles: Union[Dict[str, str], List[Dict[str, str]], str, List[str]] = None, check_email_deliverability: bool = True, account_name: Optional[str] = None, **kwargs, ) -> User: """ - Convenience wrapper to create a new User object, together with + Convenience wrapper to create a new User object. + + It hashes the password. + + In addition to the user, this function can create - new Role objects (if user roles do not already exist) - an Account object (if it does not exist yet) - a new DataSource object that corresponds to the user @@ -89,6 +95,8 @@ def create_user( # noqa: C901 """ # Check necessary input explicitly before anything happens + if password is None or password == "": + raise InvalidFlexMeasuresUser("No password provided.") if "email" not in kwargs: raise InvalidFlexMeasuresUser("No email address provided.") email = kwargs.pop("email").strip() @@ -138,12 +146,9 @@ def create_user( # noqa: C901 db.session.flush() user_datastore = SQLAlchemySessionUserDatastore(db.session, User, Role) - kwargs.update(email=email, username=username) + kwargs.update(password=hash_password(password), email=email, username=username) user = user_datastore.create_user(**kwargs) - if user.password is None: - set_random_password(user) - user.account_id = account.id # add roles to user (creating new roles if necessary) diff --git a/flexmeasures/data/tests/test_user_services.py b/flexmeasures/data/tests/test_user_services.py index a9f856c84..8cc17437c 100644 --- a/flexmeasures/data/tests/test_user_services.py +++ b/flexmeasures/data/tests/test_user_services.py @@ -1,7 +1,5 @@ import pytest -from flask_security.utils import hash_password - from flexmeasures.data.models.user import User, Role from flexmeasures.data.services.users import ( create_user, @@ -21,7 +19,7 @@ def test_create_user( num_users = User.query.count() user = create_user( email="new_user@seita.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts_fresh_db["Prosumer"].name, user_roles=["SomeRole"], ) @@ -39,12 +37,12 @@ def test_create_invalid_user( ): """A few invalid attempts to create a user""" with pytest.raises(InvalidFlexMeasuresUser) as exc_info: - create_user(password=hash_password("testtest")) + create_user(password="testtest") assert "No email" in str(exc_info.value) with pytest.raises(InvalidFlexMeasuresUser) as exc_info: create_user( email="test_user_AT_seita.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts_fresh_db["Prosumer"].name, ) assert "not a valid" in str(exc_info.value) @@ -52,7 +50,7 @@ def test_create_invalid_user( with pytest.raises(InvalidFlexMeasuresUser) as exc_info: create_user( email="test_prosumer@sdkkhflzsxlgjxhglkzxjhfglkxhzlzxcvlzxvb.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_account_fresh_db.name, ) assert "not seem to be deliverable" in str(exc_info.value) @@ -60,7 +58,7 @@ def test_create_invalid_user( with pytest.raises(InvalidFlexMeasuresUser) as exc_info: create_user( email="test_prosumer_user@seita.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts_fresh_db["Prosumer"].name, ) assert "already exists" in str(exc_info.value) @@ -68,7 +66,7 @@ def test_create_invalid_user( create_user( email="new_user@seita.nl", username="Test Prosumer User", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts_fresh_db["Prosumer"].name, ) assert "already exists" in str(exc_info.value) @@ -76,7 +74,7 @@ def test_create_invalid_user( create_user( email="new_user@seita.nl", username="New Test Prosumer User", - password=hash_password("testtest"), + password="testtest", ) assert "without knowing the name of the account" in str(exc_info.value) diff --git a/flexmeasures/ui/__init__.py b/flexmeasures/ui/__init__.py index d52aaab5c..81c43c695 100644 --- a/flexmeasures/ui/__init__.py +++ b/flexmeasures/ui/__init__.py @@ -118,7 +118,7 @@ def basic_admin_auth(): def add_jinja_filters(app): - from flexmeasures.ui.utils.view_utils import asset_icon_name, username + from flexmeasures.ui.utils.view_utils import asset_icon_name, username, accountname app.jinja_env.filters["zip"] = zip # Allow zip function in templates app.jinja_env.add_extension( @@ -138,6 +138,7 @@ def add_jinja_filters(app): ) app.jinja_env.filters["asset_icon"] = asset_icon_name app.jinja_env.filters["username"] = username + app.jinja_env.filters["accountname"] = accountname app.jinja_env.filters[ "parse_config_entry_by_account_roles" ] = parse_config_entry_by_account_roles diff --git a/flexmeasures/ui/charts/latest_state.py b/flexmeasures/ui/charts/latest_state.py index 1d34d5f83..4fd70f039 100644 --- a/flexmeasures/ui/charts/latest_state.py +++ b/flexmeasures/ui/charts/latest_state.py @@ -54,7 +54,7 @@ def get_latest_power_as_plot(sensor: Sensor, small: bool = False) -> Tuple[str, latest_power_value *= -1 else: consumption = False - capacity_in_mw = sensor.get_attribute("capacity_in_mw") + capacity_in_mw = sensor.get_attribute("capacity_in_mw", latest_power_value) data = { latest_measurement_time_str if not small else "": [0], "Capacity in use": [latest_power_value], diff --git a/flexmeasures/ui/crud/assets.py b/flexmeasures/ui/crud/assets.py index d63566c58..683cda0fb 100644 --- a/flexmeasures/ui/crud/assets.py +++ b/flexmeasures/ui/crud/assets.py @@ -1,55 +1,39 @@ from typing import Union, Optional, Tuple -from datetime import timedelta import copy from flask import url_for, current_app from flask_classful import FlaskView from flask_wtf import FlaskForm from flask_security import login_required, current_user -from wtforms import StringField, DecimalField, IntegerField, SelectField +from wtforms import StringField, DecimalField, SelectField from wtforms.validators import DataRequired +from flexmeasures.auth.policy import ADMIN_ROLE from flexmeasures.data.config import db from flexmeasures.auth.error_handling import unauthorized_handler -from flexmeasures.data.services.users import get_users -from flexmeasures.data.services.resources import get_markets, get_center_location -from flexmeasures.data.models.assets import AssetType, Asset -from flexmeasures.data.models.user import User -from flexmeasures.data.models.markets import Market -from flexmeasures.utils.flexmeasures_inflection import parameterize -from flexmeasures.ui.utils.plotting_utils import get_latest_power_as_plot +from flexmeasures.data.services.resources import get_center_location +from flexmeasures.data.models.generic_assets import GenericAssetType, GenericAsset +from flexmeasures.data.models.user import Account +from flexmeasures.data.models.time_series import Sensor +from flexmeasures.ui.charts.latest_state import get_latest_power_as_plot from flexmeasures.ui.utils.view_utils import render_flexmeasures_template from flexmeasures.ui.crud.api_wrapper import InternalApi +from flexmeasures.utils.unit_utils import is_power_unit """ Asset crud view. -Note: This uses the internal API 2.0 ― if these endpoints get updated in a later version, - we should change the version here. +Note: This uses the internal dev API version + ― if those endpoints get moved or updated to a higher version, + we probably should change the version used here, as well. """ class AssetForm(FlaskForm): - """The default asset form only allows to edit the name, numbers and market.""" - - display_name = StringField("Display name") - capacity_in_mw = DecimalField("Capacity in MW", places=2) - unit = SelectField("Unit", default="MW", choices=[("MW", "MW")]) - event_resolution = IntegerField( - "Resolution in minutes (e.g. 15)", - default=15, - ) - min_soc_in_mwh = DecimalField( - "Minimum state of charge (SOC) in MWh", - places=2, - default=0, - ) - max_soc_in_mwh = DecimalField( - "Maximum state of charge (SOC) in MWh", - places=2, - default=0, - ) + """The default asset form only allows to edit the name and location.""" + + name = StringField("Name") latitude = DecimalField( "Latitude", places=4, @@ -60,25 +44,22 @@ class AssetForm(FlaskForm): places=4, render_kw={"placeholder": "--Click the map or enter a longitude--"}, ) - market_id = SelectField("Market", coerce=int) def validate_on_submit(self): - if self.market_id.data == -1: - self.market_id.data = ( + if ( + hasattr(self, "generic_asset_type_id") + and self.generic_asset_type_id.data == -1 + ): + self.generic_asset_type_id.data = ( "" # cannot be coerced to int so will be flagged as invalid input ) + if hasattr(self, "account_id") and self.account_id.data == -1: + del self.account_id # asset will be public return super().validate_on_submit() - def to_json(self, for_posting=False) -> dict: + def to_json(self) -> dict: """ turn form data into a JSON we can POST to our internal API """ data = copy.copy(self.data) - if for_posting: - data["name"] = parameterize( - data["display_name"] - ) # best guess at un-humanizing - data["capacity_in_mw"] = float(data["capacity_in_mw"]) - data["min_soc_in_mwh"] = float(data["min_soc_in_mwh"]) - data["max_soc_in_mwh"] = float(data["max_soc_in_mwh"]) data["longitude"] = float(data["longitude"]) data["latitude"] = float(data["latitude"]) @@ -100,55 +81,65 @@ def process_api_validation_errors(self, api_response: dict): class NewAssetForm(AssetForm): - """Here, in addition, we allow to set asset type and owner.""" + """Here, in addition, we allow to set asset type and account.""" - asset_type_name = SelectField("Asset type", validators=[DataRequired()]) - owner_id = SelectField("Owner", coerce=int) + generic_asset_type_id = SelectField( + "Asset type", coerce=int, validators=[DataRequired()] + ) + account_id = SelectField("Account", coerce=int) def with_options( form: Union[AssetForm, NewAssetForm] ) -> Union[AssetForm, NewAssetForm]: - if "asset_type_name" in form: - form.asset_type_name.choices = [("none chosen", "--Select type--")] + [ - (atype.name, atype.display_name) for atype in AssetType.query.all() - ] - if "owner_id" in form: - form.owner_id.choices = [(-1, "--Select existing--")] + [ - (o.id, o.username) for o in get_users(role_name="Prosumer") + if "generic_asset_type_id" in form: + form.generic_asset_type_id.choices = [(-1, "--Select type--")] + [ + (atype.id, atype.name) for atype in GenericAssetType.query.all() ] - if "market_id" in form: - form.market_id.choices = [(-1, "--Select existing--")] + [ - (m.id, m.display_name) for m in get_markets() + if "account_id" in form: + form.account_id.choices = [(-1, "--Select account--")] + [ + (account.id, account.name) for account in Account.query.all() ] return form def process_internal_api_response( asset_data: dict, asset_id: Optional[int] = None, make_obj=False -) -> Union[Asset, dict]: +) -> Union[GenericAsset, dict]: """ Turn data from the internal API into something we can use to further populate the UI. Either as an asset object or a dict for form filling. """ + + def expunge_asset(): + # use if no insert is wanted from a previous query which flushes its results + if asset in db.session: + db.session.expunge(asset) + asset_data.pop("status", None) # might have come from requests.response if asset_id: asset_data["id"] = asset_id if make_obj: - asset_data["event_resolution"] = timedelta( - minutes=int(asset_data["event_resolution"]) + asset = GenericAsset(**asset_data) # TODO: use schema? + asset.generic_asset_type = GenericAssetType.query.get( + asset.generic_asset_type_id ) - return Asset(**asset_data) - asset_data["event_resolution"] = asset_data["event_resolution"].seconds / 60 + if "id" in asset_data: + expunge_asset() + asset.sensors = Sensor.query.filter( + Sensor.generic_asset_id == asset_data["id"] + ).all() + expunge_asset() + return asset return asset_data class AssetCrudUI(FlaskView): """ - These views help us offering a Jinja2-based UI. + These views help us offer a Jinja2-based UI. The main focus on logic is the API, so these views simply call the API functions, and deal with the response. - Some new functionality, like fetching users and markets, is added here. + Some new functionality, like fetching accounts and asset types, is added here. """ route_base = "/assets" @@ -157,27 +148,39 @@ class AssetCrudUI(FlaskView): def index(self, msg=""): """/assets""" get_assets_response = InternalApi().get( - url_for("flexmeasures_api_v2_0.get_assets") + url_for("AssetAPI:index"), query={"account_id": current_user.account_id} ) assets = [ process_internal_api_response(ad, make_obj=True) for ad in get_assets_response.json() ] return render_flexmeasures_template( - "crud/assets.html", assets=assets, message=msg + "crud/assets.html", account=current_user.account, assets=assets, message=msg ) @login_required - def owned_by(self, owner_id: str): - """/assets/owned_by/""" + def owned_by(self, account_id: str): + """/assets/owned_by/""" + msg = "" get_assets_response = InternalApi().get( - url_for("flexmeasures_api_v2_0.get_assets"), query={"owner_id": owner_id} + url_for("AssetAPI:index"), + query={"account_id": account_id}, + do_not_raise_for=[404], + ) + if get_assets_response.status_code == 404: + assets = [] + msg = f"Account {account_id} unknown." + else: + assets = [ + process_internal_api_response(ad, make_obj=True) + for ad in get_assets_response.json() + ] + return render_flexmeasures_template( + "crud/assets.html", + account=Account.query.get(account_id), + assets=assets, + msg=msg, ) - assets = [ - process_internal_api_response(ad, make_obj=True) - for ad in get_assets_response.json() - ] - return render_flexmeasures_template("crud/assets.html", assets=assets) @login_required def get(self, id: str): @@ -196,9 +199,7 @@ def get(self, id: str): mapboxAccessToken=current_app.config.get("MAPBOX_ACCESS_TOKEN", ""), ) - get_asset_response = InternalApi().get( - url_for("flexmeasures_api_v2_0.get_asset", id=id) - ) + get_asset_response = InternalApi().get(url_for("AssetAPI:fetch_one", id=id)) asset_dict = get_asset_response.json() asset_form = with_options(AssetForm()) @@ -206,7 +207,7 @@ def get(self, id: str): asset = process_internal_api_response(asset_dict, int(id), make_obj=True) asset_form.process(data=process_internal_api_response(asset_dict)) - latest_measurement_time_str, asset_plot_html = get_latest_power_as_plot(asset) + latest_measurement_time_str, asset_plot_html = _get_latest_power_plot(asset) return render_flexmeasures_template( "crud/asset.html", asset=asset, @@ -223,36 +224,32 @@ def post(self, id: str): Most of the code deals with creating a user for the asset if no existing is chosen. """ - asset: Asset = None + asset: GenericAsset = None error_msg = "" if id == "create": asset_form = with_options(NewAssetForm()) - owner, owner_error = set_owner(asset_form) - market, market_error = set_market(asset_form) - - if asset_form.asset_type_name.data == "none chosen": - asset_form.asset_type_name.data = "" + account, account_error = _set_account(asset_form) + asset_type, asset_type_error = _set_asset_type(asset_form) form_valid = asset_form.validate_on_submit() # Fill up the form with useful errors for the user - if owner_error is not None: + if account_error is not None: form_valid = False - asset_form.owner_id.errors.append(owner_error) - if market_error is not None: + asset_form.account_id.errors.append(account_error) + if asset_type_error is not None: form_valid = False - asset_form.market_id.errors.append(market_error) + asset_form.generic_asset_type_id.errors.append(asset_type_error) # Create new asset or return the form for new assets with a message - if form_valid and owner is not None and market is not None: + if form_valid and asset_type is not None: post_asset_response = InternalApi().post( - url_for("flexmeasures_api_v2_0.post_assets"), - args=asset_form.to_json(for_posting=True), + url_for("AssetAPI:post"), + args=asset_form.to_json(), do_not_raise_for=[400, 422], ) - if post_asset_response.status_code in (200, 201): asset_dict = post_asset_response.json() asset = process_internal_api_response( @@ -282,26 +279,28 @@ def post(self, id: str): else: asset_form = with_options(AssetForm()) if not asset_form.validate_on_submit(): - asset = Asset.query.get(id) - latest_measurement_time_str, asset_plot_html = get_latest_power_as_plot( + asset = GenericAsset.query.get(id) + latest_measurement_time_str, asset_plot_html = _get_latest_power_plot( asset ) # Display the form data, but set some extra data which the page wants to show. - asset_info = asset_form.data.copy() + asset_info = asset_form.to_json() asset_info["id"] = id - asset_info["owner_id"] = asset.owner_id - asset_info["entity_address"] = asset.entity_address + asset_info["account_id"] = asset.account_id + asset = process_internal_api_response( + asset_info, int(id), make_obj=True + ) return render_flexmeasures_template( "crud/asset.html", asset_form=asset_form, - asset=asset_info, + asset=asset, msg="Cannot edit asset.", latest_measurement_time_str=latest_measurement_time_str, asset_plot_html=asset_plot_html, mapboxAccessToken=current_app.config.get("MAPBOX_ACCESS_TOKEN", ""), ) patch_asset_response = InternalApi().patch( - url_for("flexmeasures_api_v2_0.patch_asset", id=id), + url_for("AssetAPI:patch", id=id), args=asset_form.to_json(), do_not_raise_for=[400, 422], ) @@ -317,9 +316,9 @@ def post(self, id: str): ) msg = "Cannot edit asset." asset_form.process_api_validation_errors(patch_asset_response.json()) - asset = Asset.query.get(id) + asset = GenericAsset.query.get(id) - latest_measurement_time_str, asset_plot_html = get_latest_power_as_plot(asset) + latest_measurement_time_str, asset_plot_html = _get_latest_power_plot(asset) return render_flexmeasures_template( "crud/asset.html", asset=asset, @@ -333,45 +332,66 @@ def post(self, id: str): @login_required def delete_with_data(self, id: str): """Delete via /assets/delete_with_data/""" - InternalApi().delete( - url_for("flexmeasures_api_v2_0.delete_asset", id=id), - ) + InternalApi().delete(url_for("AssetAPI:delete", id=id)) return self.index( msg=f"Asset {id} and assorted meter readings / forecasts have been deleted." ) -def set_owner(asset_form: NewAssetForm) -> Tuple[Optional[User], Optional[str]]: - """Set a user as owner for the to-be-created asset. - Return the user (if available and an error message)""" - owner = None - owner_error = None +def _set_account(asset_form: NewAssetForm) -> Tuple[Optional[Account], Optional[str]]: + """Set an account for the to-be-created asset. + Return the account (if available) and an error message""" + account = None + account_error = None - if asset_form.owner_id.data == -1: - owner_error = "Pick an existing owner." - else: - owner = User.query.filter_by(id=int(asset_form.owner_id.data)).one_or_none() + if asset_form.account_id.data == -1: + if current_user.has_role(ADMIN_ROLE): + return None, None # Account can be None (public asset) + else: + account_error = "Please pick an existing account." - if owner: - asset_form.owner_id.data = owner.id + account = Account.query.filter_by(id=int(asset_form.account_id.data)).one_or_none() + + if account: + asset_form.account_id.data = account.id else: - current_app.logger.error(owner_error) - return owner, owner_error + current_app.logger.error(account_error) + return account, account_error -def set_market(asset_form: NewAssetForm) -> Tuple[Optional[Market], Optional[str]]: - """Set a market for the to-be-created asset. - Return the market (if available) and an error message.""" - market = None - market_error = None +def _set_asset_type( + asset_form: NewAssetForm, +) -> Tuple[Optional[GenericAssetType], Optional[str]]: + """Set an asset type for the to-be-created asset. + Return the asset type (if available) and an error message.""" + asset_type = None + asset_type_error = None - if int(asset_form.market_id.data) == -1: - market_error = "Pick an existing market." + if int(asset_form.generic_asset_type_id.data) == -1: + asset_type_error = "Pick an existing asset type." else: - market = Market.query.filter_by(id=int(asset_form.market_id.data)).one_or_none() + asset_type = GenericAssetType.query.filter_by( + id=int(asset_form.generic_asset_type_id.data) + ).one_or_none() - if market: - asset_form.market_id.data = market.id + if asset_type: + asset_form.generic_asset_type_id.data = asset_type.id + else: + current_app.logger.error(asset_type_error) + return asset_type, asset_type_error + + +def _get_latest_power_plot(asset: GenericAsset) -> Tuple[str, str]: + power_sensor: Optional[Sensor] = None + if asset._sa_instance_state.transient: + sensors = Sensor.query.filter(Sensor.generic_asset_id == asset.id).all() + else: + sensors = asset.sensors + for sensor in sensors: + if is_power_unit(sensor.unit): + power_sensor = sensor + break + if power_sensor is None: + return "", "" else: - current_app.logger.error(market_error) - return market, market_error + return get_latest_power_as_plot(power_sensor) diff --git a/flexmeasures/ui/templates/admin/logged_in_user.html b/flexmeasures/ui/templates/admin/logged_in_user.html index e4fce8949..31246fbb0 100644 --- a/flexmeasures/ui/templates/admin/logged_in_user.html +++ b/flexmeasures/ui/templates/admin/logged_in_user.html @@ -51,26 +51,26 @@

Overview for logged-in user: {{ logged_in_user.username }}

- Time Zone + Assets in account - {{logged_in_user.timezone }} + {{ num_assets }} - Last login was + Time Zone - {{ logged_in_user.last_login_at | localized_datetime }} + {{logged_in_user.timezone }} - Assets owned + Last login was - {{ num_assets }} + {{ logged_in_user.last_login_at | localized_datetime }} diff --git a/flexmeasures/ui/templates/crud/asset.html b/flexmeasures/ui/templates/crud/asset.html index 0762524eb..5a19ef731 100644 --- a/flexmeasures/ui/templates/crud/asset.html +++ b/flexmeasures/ui/templates/crud/asset.html @@ -2,7 +2,7 @@ {% set active_page = "assets" %} -{% block title %} {{asset.display_name}} {% endblock %} +{% block title %} {{asset.name}} {% endblock %} {% block divs %} @@ -46,77 +46,19 @@
-

Edit asset {{ asset.display_name }}

+

Edit asset {{ asset.name }}

- (Owned by {{ asset.owner_id | username }}) + (Owned by account: {{ asset.account_id | accountname }})
- {{ asset_form.display_name.label(class="col-sm-6 control-label") }} + {{ asset_form.name.label(class="col-sm-6 control-label") }}
- {{ asset_form.display_name(class_="form-control") }} - {% for error in asset_form.errors.display_name %} + {{ asset_form.name(class_="form-control") }} + {% for error in asset_form.errors.name %} [{{error}}] {% endfor %}
-
- {{ asset_form.capacity_in_mw.label(class="col-sm-6 control-label") }} -
- {{ asset_form.capacity_in_mw(class_="form-control") }} - {% for error in asset_form.errors.capacity_in_mw %} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.unit.label(class="col-sm-6 control-label") }} -
- {{ asset_form.unit(class_="form-control") }} - {% for error in asset_form.errors.unit %} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.event_resolution.label(class="col-sm-6 control-label") }} -
- {{ asset_form.event_resolution(class_="form-control") }} - {% for error in asset_form.errors.event_resolution %} - [{{error}}] - {% endfor %} -
-
- {% if asset.asset_type_name == "battery" %} -
- {{ asset_form.min_soc_in_mwh.label(class="col-sm-6 control-label") }} -
- {{ asset_form.min_soc_in_mwh(class_="form-control") }} - {% for error in asset_form.errors.min_soc_in_mwh %} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.max_soc_in_mwh.label(class="col-sm-6 control-label") }} -
- {{ asset_form.max_soc_in_mwh(class_="form-control") }} - {% for error in asset_form.errors.max_soc_in_mwh %} - [{{error}}] - {% endfor %} -
-
- -
- -
- - -
-
- {% endif %} -
{{ asset_form.latitude.label(class="col-sm-6 control-label") }}
@@ -136,14 +78,19 @@

Edit asset {{ asset.display_name }}

- {{ asset_form.market_id.label(class="col-sm-6 control-label") }} -
- {{ asset_form.market_id(class_="form-control") }} - {% for error in asset_form.errors.market_id %} - [{{error}}] - {% endfor %} -
+ +
+ +
+
+ +
+ +
+
@@ -161,24 +108,23 @@

API info for this asset

- +
- -
-
-
- -
- +
+ +

Latest state

+ {% if asset_plot_html %} (At {{ latest_measurement_time_str | safe}}) {{ asset_plot_html | safe}} + {% else %} + No last state known (yet). + {% endif %}

Location

@@ -188,6 +134,47 @@

Location

+ +
+
+ +

All sensors for this asset

+ + + + + + + + + + + + {% for sensor in asset.sensors: %} + + + + + + + + {% endfor %} + +
NameUnitResolutionEntity addressData
+ {{ sensor.name }} + + {{ sensor.unit }} + + {{ sensor.event_resolution | naturalized_timedelta }} + + {{ sensor.entity_address }} + + View plot +
+
+
+ + @@ -209,7 +196,7 @@

Location

// create marker var asset_icon = new L.DivIcon({ className: 'map-icon', - html: '', + html: '', iconSize: [100, 100], // size of the icon iconAnchor: [50, 50], // point of the icon which will correspond to marker's location popupAnchor: [0, -50] // point from which the popup should open relative to the iconAnchor diff --git a/flexmeasures/ui/templates/crud/asset_new.html b/flexmeasures/ui/templates/crud/asset_new.html index 6db6773e9..2d28e02f8 100644 --- a/flexmeasures/ui/templates/crud/asset_new.html +++ b/flexmeasures/ui/templates/crud/asset_new.html @@ -26,64 +26,28 @@

Creating a new asset

- {{ asset_form.display_name.label(class="col-sm-6 control-label") }} + {{ asset_form.name.label(class="col-sm-6 control-label") }}
- {{ asset_form.display_name(class_="form-control") }} - {% for error in asset_form.errors.display_name %} + {{ asset_form.name(class_="form-control") }} + {% for error in asset_form.errors.name %} [{{error}}] {% endfor %}
- {{ asset_form.asset_type_name.label(class="col-sm-6 control-label") }} + {{ asset_form.generic_asset_type_id.label(class="col-sm-6 control-label")}}
- {{ asset_form.asset_type_name(class_="form-control") }} - {% for error in asset_form.errors.asset_type_name %} + {{ asset_form.generic_asset_type_id(class_="form-control") }} + {% for error in asset_form.errors.generic_asset_type_id%} [{{error}}] {% endfor %}
- {{ asset_form.market_id.label(class="col-sm-6 control-label")}} + {{ asset_form.account_id.label(class="col-sm-6 control-label") }}
- {{ asset_form.market_id(class_="form-control") }} - {% for error in asset_form.errors.market_id%} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.owner_id.label(class="col-sm-6 control-label") }} -
- {{ asset_form.owner_id(class_="form-control") }} - {% for error in asset_form.errors.owner_id %} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.capacity_in_mw.label(class="col-sm-6 control-label") }} -
- {{ asset_form.capacity_in_mw(class_="form-control") }} - {% for error in asset_form.errors.capacity_in_mw %} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.unit.label(class="col-sm-6 control-label") }} -
- {{ asset_form.unit(class_="form-control") }} - {% for error in asset_form.errors.unit %} - [{{error}}] - {% endfor %} -
-
-
- {{ asset_form.event_resolution.label(class="col-sm-6 control-label") }} -
- {{ asset_form.event_resolution(class_="form-control") }} - {% for error in asset_form.errors.event_resolution %} + {{ asset_form.account_id(class_="form-control") }} + {% for error in asset_form.errors.account_id %} [{{error}}] {% endfor %}
@@ -106,42 +70,6 @@

Creating a new asset

{% endfor %}
-
diff --git a/flexmeasures/ui/templates/crud/assets.html b/flexmeasures/ui/templates/crud/assets.html index a44e932c6..4d1e6b8f9 100644 --- a/flexmeasures/ui/templates/crud/assets.html +++ b/flexmeasures/ui/templates/crud/assets.html @@ -10,18 +10,15 @@
-

All assets

+

All assets owned by account {{account.name}}

- - - - - + + - - - diff --git a/flexmeasures/ui/templates/views/sensors.html b/flexmeasures/ui/templates/views/sensors.html index c71387d9f..78a971c7d 100644 --- a/flexmeasures/ui/templates/views/sensors.html +++ b/flexmeasures/ui/templates/views/sensors.html @@ -1,8 +1,8 @@ {% extends "base.html" %} -{% set active_page = "assets" %} +{% set active_page = "sensors" %} -{% block title %} Assets {% endblock %} +{% block title %} Sensor data {% endblock %} {% block divs %} diff --git a/flexmeasures/ui/tests/conftest.py b/flexmeasures/ui/tests/conftest.py index 2d12f5e88..a3bc24e30 100644 --- a/flexmeasures/ui/tests/conftest.py +++ b/flexmeasures/ui/tests/conftest.py @@ -1,7 +1,5 @@ import pytest -from flask_security.utils import hash_password - from flexmeasures.data.services.users import create_user from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.weather import WeatherSensor, WeatherSensorType @@ -40,13 +38,15 @@ def setup_ui_test_data( """ Create another prosumer, without data, and an admin Also, a weather sensor (and sensor type). + + TODO: review if any of these are really needed (might be covered now by main conftest) """ print("Setting up data for UI tests on %s" % db.engine) create_user( username="Site Admin", email="flexmeasures-admin@seita.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts["Prosumer"].name, user_roles=dict(name="admin", description="A site admin."), ) @@ -54,7 +54,7 @@ def setup_ui_test_data( test_user_ui = create_user( username=" Test Prosumer User UI", email="test_user_ui@seita.nl", - password=hash_password("testtest"), + password="testtest", account_name=setup_accounts["Prosumer"].name, ) asset = Asset( diff --git a/flexmeasures/ui/tests/test_asset_crud.py b/flexmeasures/ui/tests/test_asset_crud.py index 094b337a5..d034ce37a 100644 --- a/flexmeasures/ui/tests/test_asset_crud.py +++ b/flexmeasures/ui/tests/test_asset_crud.py @@ -11,9 +11,11 @@ The real logic tests are done in the api package, which is also the better place for that. """ +api_path_assets = "http://localhost//api/dev/generic_assets/" + def test_assets_page_empty(db, client, requests_mock, as_prosumer_user1): - requests_mock.get("http://localhost//api/v2_0/assets", status_code=200, json={}) + requests_mock.get(f"{api_path_assets}?account_id=1", status_code=200, json={}) asset_index = client.get(url_for("AssetCrudUI:index"), follow_redirects=True) assert asset_index.status_code == 200 @@ -24,16 +26,16 @@ def test_assets_page_nonempty( ): mock_assets = mock_asset_response(multiple=True) requests_mock.get( - "http://localhost//api/v2_0/assets", status_code=200, json=mock_assets + f"{api_path_assets}?account_id=1", status_code=200, json=mock_assets ) if use_owned_by: asset_index = client.get( - url_for("AssetCrudUI:owned_by", owner_id=mock_assets[0]["owner_id"]) + url_for("AssetCrudUI:owned_by", account_id=mock_assets[0]["account_id"]) ) else: asset_index = client.get(url_for("AssetCrudUI:index")) for asset in mock_assets: - assert asset["display_name"].encode() in asset_index.data + assert asset["name"].encode() in asset_index.data def test_new_asset_page(client, setup_assets, as_admin): @@ -47,27 +49,21 @@ def test_asset_page(db, client, setup_assets, requests_mock, as_prosumer_user1): asset = user.assets[0] db.session.expunge(user) mock_asset = mock_asset_response(as_list=False) - mock_asset["capacity_in_mw"] = asset.capacity_in_mw mock_asset["latitude"] = asset.latitude mock_asset["longitude"] = asset.longitude - requests_mock.get( - f"http://localhost//api/v2_0/asset/{asset.id}", status_code=200, json=mock_asset - ) + requests_mock.get(f"{api_path_assets}{asset.id}", status_code=200, json=mock_asset) asset_page = client.get( url_for("AssetCrudUI:get", id=asset.id), follow_redirects=True ) - assert ("Edit asset %s" % mock_asset["display_name"]).encode() in asset_page.data - assert str(mock_asset["capacity_in_mw"]).encode() in asset_page.data + assert ("Edit asset %s" % mock_asset["name"]).encode() in asset_page.data assert str(mock_asset["latitude"]).encode() in asset_page.data assert str(mock_asset["longitude"]).encode() in asset_page.data def test_edit_asset(db, client, setup_assets, requests_mock, as_admin): mock_asset = mock_asset_response(as_list=False) - requests_mock.patch( - "http://localhost//api/v2_0/asset/1", status_code=200, json=mock_asset - ) + requests_mock.patch(f"{api_path_assets}1", status_code=200, json=mock_asset) response = client.post( url_for("AssetCrudUI:post", id=1), follow_redirects=True, @@ -75,7 +71,7 @@ def test_edit_asset(db, client, setup_assets, requests_mock, as_admin): ) assert response.status_code == 200 assert b"Editing was successful" in response.data - assert mock_asset["display_name"] in str(response.data) + assert mock_asset["name"] in str(response.data) assert str(mock_asset["latitude"]) in str(response.data) assert str(mock_asset["longitude"]) in str(response.data) @@ -83,10 +79,8 @@ def test_edit_asset(db, client, setup_assets, requests_mock, as_admin): def test_add_asset(db, client, setup_assets, requests_mock, as_admin): """Add a new asset""" user = find_user_by_email("test_prosumer_user@seita.nl") - mock_asset = mock_asset_response(owner_id=user.id, as_list=False) - requests_mock.post( - "http://localhost//api/v2_0/assets", status_code=201, json=mock_asset - ) + mock_asset = mock_asset_response(account_id=user.account.id, as_list=False) + requests_mock.post(api_path_assets, status_code=201, json=mock_asset) response = client.post( url_for("AssetCrudUI:post", id="create"), follow_redirects=True, @@ -95,15 +89,15 @@ def test_add_asset(db, client, setup_assets, requests_mock, as_admin): assert response.status_code == 200 # response is HTML form assert "html" in response.content_type assert b"Creation was successful" in response.data - assert mock_asset["display_name"] in str(response.data) + assert mock_asset["name"] in str(response.data) assert str(mock_asset["latitude"]) in str(response.data) assert str(mock_asset["longitude"]) in str(response.data) def test_delete_asset(client, db, requests_mock, as_admin): """Delete an asset""" - requests_mock.delete("http://localhost//api/v2_0/asset/1", status_code=204, json={}) - requests_mock.get("http://localhost//api/v2_0/assets", status_code=200, json={}) + requests_mock.delete(f"{api_path_assets}1", status_code=204, json={}) + requests_mock.get(api_path_assets, status_code=200, json={}) response = client.get( url_for("AssetCrudUI:delete_with_data", id=1), follow_redirects=True, diff --git a/flexmeasures/ui/tests/test_views.py b/flexmeasures/ui/tests/test_views.py index 455ba19eb..5f858514e 100644 --- a/flexmeasures/ui/tests/test_views.py +++ b/flexmeasures/ui/tests/test_views.py @@ -30,7 +30,11 @@ def test_portfolio_responds(client, setup_assets, as_prosumer_user1): def test_assets_responds(client, requests_mock, as_prosumer_user1): - requests_mock.get("http://localhost//api/v2_0/assets", status_code=200, json={}) + requests_mock.get( + "http://localhost//api/dev/generic_assets/?account_id=1", + status_code=200, + json={}, + ) assets_page = client.get(url_for("AssetCrudUI:index"), follow_redirects=True) assert assets_page.status_code == 200 assert b"All assets" in assets_page.data @@ -60,12 +64,3 @@ def test_analytics_responds(db, client, setup_assets, as_prosumer_user1): def test_logout(client, as_prosumer_user1): logout_response = logout(client) assert b"Please log in" in logout_response.data - - -""" TODO https://trello.com/c/GjsWgLOE/226-load-docs-in-bvpui-and-put-it-inside-based-template -def test_docs_responds(app, authable, client): - login(client, "wind@seita.nl", "wind") - dashboard = client.get(url_for("flexmeasures_ui.docs_view"), follow_redirects=True) - assert dashboard.status_code == 200 - assert b"Control actions" in dashboard.data -""" diff --git a/flexmeasures/ui/tests/utils.py b/flexmeasures/ui/tests/utils.py index 5e0a47d74..1f2e0c747 100644 --- a/flexmeasures/ui/tests/utils.py +++ b/flexmeasures/ui/tests/utils.py @@ -19,31 +19,23 @@ def logout(client): def mock_asset_response( asset_id: int = 1, - owner_id: int = 3, - market_id: int = 1, + account_id: int = 1, as_list: bool = True, multiple: bool = False, ) -> dict: asset = dict( id=asset_id, name="TestAsset", - display_name="New Test Asset", - asset_type_name="wind", - market_id=int(market_id), - owner_id=int(owner_id), - capacity_in_mw=100, + generic_asset_type_id=1, + account_id=int(account_id), latitude=70.4, longitude=30.9, - min_soc_in_mwh=0, - max_soc_in_mwh=0, - soc_in_mwh=0, - event_resolution=22, # "PT15M", ) if as_list: asset_list = [asset] if multiple: asset2 = copy.deepcopy(asset) - asset2["capacity_in_mw"] = 200 + asset2["name"] = "TestAsset2" asset_list.append(asset2) return asset_list return asset @@ -80,5 +72,5 @@ def mock_user_response( def mock_api_data_as_form_input(api_data: dict) -> dict: form_input = copy.deepcopy(api_data) - form_input["owner"] = api_data["owner_id"] + form_input["account"] = api_data["account_id"] return form_input diff --git a/flexmeasures/ui/utils/view_utils.py b/flexmeasures/ui/utils/view_utils.py index d58a8b15d..b143b7d4d 100644 --- a/flexmeasures/ui/utils/view_utils.py +++ b/flexmeasures/ui/utils/view_utils.py @@ -15,7 +15,7 @@ from flexmeasures.auth.policy import ADMIN_ROLE from flexmeasures.utils import time_utils from flexmeasures.ui import flexmeasures_ui -from flexmeasures.data.models.user import User +from flexmeasures.data.models.user import User, Account from flexmeasures.data.models.assets import Asset from flexmeasures.data.models.markets import Market from flexmeasures.data.models.weather import WeatherSensorType @@ -369,3 +369,12 @@ def username(user_id) -> str: return "" else: return user.username + + +def accountname(account_id) -> str: + account = Account.query.get(account_id) + if account is None: + current_app.logger.warning(f"Could not find account with id {account_id}") + return "" + else: + return account.name From 4f0956afee70d975a85509ac7708423569aec737 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Wed, 5 Jan 2022 21:24:10 +0100 Subject: [PATCH 28/46] Small addendum to 290 (#301) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix typo Signed-off-by: Nicolas Höning * stop using click.echo in util code Signed-off-by: Nicolas Höning --- documentation/dev/note-on-datamodel-transition.rst | 2 +- flexmeasures/data/utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/documentation/dev/note-on-datamodel-transition.rst b/documentation/dev/note-on-datamodel-transition.rst index d50bb426e..37d994ae7 100644 --- a/documentation/dev/note-on-datamodel-transition.rst +++ b/documentation/dev/note-on-datamodel-transition.rst @@ -67,4 +67,4 @@ Project 9 was implemented, which moved a lot of structure over, as well as actua We are now close to being able to deprecate the old database models and route the API to the new model (see project 11). The API for assets is still in place, but the new one is already working (at /api/dev/generic_assets) and is powering what is shown in the UI. -We take care to support people on the old data mode so the transition will be as smooth as possible, as we said above. One part of this is that the ``flexmeasures db upgrade`` command copies your data to the new model. Also, creating new data (e.g. old-style assets) creates new-style data (e.g. assets/sensors) automatically. However, some edge cases are not supported in this way. For instance, edited asset meta data might have to be re-entered later. Feel free to contact us to discuss the transition if needed. \ No newline at end of file +We take care to support people on the old data model so the transition will be as smooth as possible, as we said above. One part of this is that the ``flexmeasures db upgrade`` command copies your data to the new model. Also, creating new data (e.g. old-style assets) creates new-style data (e.g. assets/sensors) automatically. However, some edge cases are not supported in this way. For instance, edited asset meta data might have to be re-entered later. Feel free to contact us to discuss the transition if needed. diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index b5096fd61..228f7d566 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -1,6 +1,5 @@ from typing import List, Optional, Union -import click from flask import current_app from timely_beliefs import BeliefsDataFrame @@ -44,7 +43,7 @@ def get_data_source( ) db.session.add(data_source) db.session.flush() # populate the primary key attributes (like id) without committing the transaction - click.echo( + current_app.logger.info( f'Session updated with new {data_source_type} data source "{data_source.__repr__()}".' ) return data_source From dcbdaf3022394d1868a7c2f0f37a6a21710cb51b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 6 Jan 2022 14:22:51 +0100 Subject: [PATCH 29/46] Merge database migrations Signed-off-by: F.N. Claessen --- .../migrations/versions/c1d316c60985_merge.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 flexmeasures/data/migrations/versions/c1d316c60985_merge.py diff --git a/flexmeasures/data/migrations/versions/c1d316c60985_merge.py b/flexmeasures/data/migrations/versions/c1d316c60985_merge.py new file mode 100644 index 000000000..6eb97c690 --- /dev/null +++ b/flexmeasures/data/migrations/versions/c1d316c60985_merge.py @@ -0,0 +1,22 @@ +"""merge + +Revision ID: c1d316c60985 +Revises: a918360f7d63, e690d373a3d9 +Create Date: 2022-01-06 14:19:58.213432 + +""" + + +# revision identifiers, used by Alembic. +revision = "c1d316c60985" +down_revision = ("a918360f7d63", "e690d373a3d9") +branch_labels = None +depends_on = None + + +def upgrade(): + pass + + +def downgrade(): + pass From 57d31e394c9d2900728cf0ec40f78a5c35e42480 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 6 Jan 2022 16:39:58 +0100 Subject: [PATCH 30/46] Bump timely-beliefs dependency to get rid of false deprecation warnings Signed-off-by: F.N. Claessen --- requirements/app.in | 2 +- requirements/app.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/app.in b/requirements/app.in index fa2947beb..4ca2352b9 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -34,7 +34,7 @@ netCDF4 siphon tables timetomodel>=0.7.1 -timely-beliefs>=1.9.0 +timely-beliefs>=1.9.1 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index 98f01eaaa..63114f2d6 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -356,7 +356,7 @@ tables==3.6.1 # via -r requirements/app.in threadpoolctl==3.0.0 # via scikit-learn -timely-beliefs==1.9.0 +timely-beliefs==1.9.1 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in From cc0ffa1c434597c211f2680f8eaf326d6bcd79c8 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 6 Jan 2022 17:03:24 +0100 Subject: [PATCH 31/46] Bump timely-beliefs dependency given previously yanked release Signed-off-by: F.N. Claessen --- requirements/app.in | 2 +- requirements/app.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/app.in b/requirements/app.in index 4ca2352b9..fa17b25b7 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -34,7 +34,7 @@ netCDF4 siphon tables timetomodel>=0.7.1 -timely-beliefs>=1.9.1 +timely-beliefs>=1.9.2 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index 63114f2d6..a351b83e1 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -356,7 +356,7 @@ tables==3.6.1 # via -r requirements/app.in threadpoolctl==3.0.0 # via scikit-learn -timely-beliefs==1.9.1 +timely-beliefs==1.9.2 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in From 868f483cd7239ef55e574be9bf74338589df6fd1 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Thu, 6 Jan 2022 19:45:13 +0100 Subject: [PATCH 32/46] Skip autoscheduling if none of the posted values represent a state change (#303) Stop autoscheduling when API calls save nothing new to the database, thereby saving redundant computation. Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/api_utils.py | 7 +++++-- flexmeasures/data/utils.py | 6 ++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index e793c5117..2c5463df7 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -365,13 +365,16 @@ def save_and_enqueue( ) # Only enqueue forecasting jobs upon successfully saving new data - if status[:7] == "success": + if status[:7] == "success" and status != "success_but_nothing_new": enqueue_forecasting_jobs(forecasting_jobs) # Pick a response if status == "success": return request_processed() - elif status == "success_with_unchanged_beliefs_skipped": + elif status in ( + "success_with_unchanged_beliefs_skipped", + "success_but_nothing_new", + ): return already_received_and_successfully_processed() return invalid_replacement() diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 228f7d566..cb36a2357 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -80,6 +80,7 @@ def save_to_db( :returns: status string, one of the following: - 'success': all beliefs were saved - 'success_with_unchanged_beliefs_skipped': not all beliefs represented a state change + - 'success_but_nothing_new': no beliefs represented a state change """ # Convert to list @@ -89,6 +90,7 @@ def save_to_db( timed_values_list = data status = "success" + values_saved = 0 for timed_values in timed_values_list: if timed_values.empty: @@ -124,6 +126,10 @@ def save_to_db( if current_app.config.get("FLEXMEASURES_MODE", "") != "play" else True, ) + values_saved += len(timed_values) # Flush to bring up potential unique violations (due to attempting to replace beliefs) db.session.flush() + + if values_saved == 0: + status = "success_but_nothing_new" return status From 7a5cee3a690f5fae22cfc549823d29897f5522c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Fri, 7 Jan 2022 09:39:39 +0100 Subject: [PATCH 33/46] fix open weather map import: adapt weather sensor location access to new model (#304) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * adapt weather sensor location access to new model Signed-off-by: Nicolas Höning * small review improvement Signed-off-by: Nicolas Höning --- flexmeasures/data/scripts/grid_weather.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flexmeasures/data/scripts/grid_weather.py b/flexmeasures/data/scripts/grid_weather.py index 9c8e7282d..21679aa74 100755 --- a/flexmeasures/data/scripts/grid_weather.py +++ b/flexmeasures/data/scripts/grid_weather.py @@ -388,13 +388,13 @@ def save_forecasts_in_db( if weather_sensor is not None: # Complain if the nearest weather sensor is further away than 2 degrees if abs( - location[0] - weather_sensor.latitude + location[0] - weather_sensor.location[0] ) > max_degree_difference_for_nearest_weather_sensor or abs( - location[1] - weather_sensor.longitude + location[1] - weather_sensor.location[1] > max_degree_difference_for_nearest_weather_sensor ): raise Exception( - f"No sufficiently close weather sensor found (within 2 degrees distance) for type {flexmeasures_sensor_type}! We're looking for: {location}, closest available: ({weather_sensor.latitude}, {weather_sensor.longitude})" + f"No sufficiently close weather sensor found (within 2 degrees distance) for type {flexmeasures_sensor_type}! We're looking for: {location}, closest available: ({weather_sensor.location})" ) weather_sensors[flexmeasures_sensor_type] = weather_sensor else: From b5b65be650590fc0602164c1525f0bf5c68d1073 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Fri, 7 Jan 2022 11:03:41 +0100 Subject: [PATCH 34/46] black some docstrings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Nicolas Höning --- flexmeasures/auth/decorators.py | 4 ++-- flexmeasures/data/models/data_sources.py | 2 +- flexmeasures/data/models/generic_assets.py | 2 +- flexmeasures/data/services/asset_grouping.py | 2 +- flexmeasures/ui/crud/assets.py | 2 +- flexmeasures/ui/utils/plotting_utils.py | 2 +- flexmeasures/ui/views/__init__.py | 2 +- flexmeasures/ui/views/portfolio.py | 2 +- flexmeasures/utils/config_utils.py | 4 ++-- flexmeasures/utils/flexmeasures_inflection.py | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/flexmeasures/auth/decorators.py b/flexmeasures/auth/decorators.py index d783dbd6b..75af4c0ad 100644 --- a/flexmeasures/auth/decorators.py +++ b/flexmeasures/auth/decorators.py @@ -23,14 +23,14 @@ def roles_accepted(*roles): - """ As in Flask-Security, but also accept admin""" + """As in Flask-Security, but also accept admin""" if ADMIN_ROLE not in roles: roles = roles + (ADMIN_ROLE,) return roles_accepted_fs(roles) def roles_required(*roles): - """ As in Flask-Security, but wave through if user is admin""" + """As in Flask-Security, but wave through if user is admin""" if current_user and current_user.has_role(ADMIN_ROLE): roles = [] return roles_required_fs(*roles) diff --git a/flexmeasures/data/models/data_sources.py b/flexmeasures/data/models/data_sources.py index 9880e37f4..90c0d0574 100644 --- a/flexmeasures/data/models/data_sources.py +++ b/flexmeasures/data/models/data_sources.py @@ -48,7 +48,7 @@ def __init__( @property def label(self): - """ Human-readable label (preferably not starting with a capital letter so it can be used in a sentence). """ + """Human-readable label (preferably not starting with a capital letter so it can be used in a sentence).""" if self.type == "user": return f"data entered by user {self.user.username}" # todo: give users a display name elif self.type == "forecasting script": diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index ce90c900d..e33526e14 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -80,7 +80,7 @@ def __repr__(self): @property def asset_type(self) -> GenericAssetType: - """ This property prepares for dropping the "generic" prefix later""" + """This property prepares for dropping the "generic" prefix later""" return self.generic_asset_type account_id = db.Column( diff --git a/flexmeasures/data/services/asset_grouping.py b/flexmeasures/data/services/asset_grouping.py index fb9e6ce25..8d418ec3d 100644 --- a/flexmeasures/data/services/asset_grouping.py +++ b/flexmeasures/data/services/asset_grouping.py @@ -146,7 +146,7 @@ class AssetGroup: unique_asset_type_names: List[str] def __init__(self, name: str, asset_query: Optional[Query] = None): - """ The asset group name is either the name of an asset group or an individual asset. """ + """The asset group name is either the name of an asset group or an individual asset.""" if name is None or name == "": raise Exception("Empty asset (group) name passed (%s)" % name) self.name = name diff --git a/flexmeasures/ui/crud/assets.py b/flexmeasures/ui/crud/assets.py index 683cda0fb..1421d17d4 100644 --- a/flexmeasures/ui/crud/assets.py +++ b/flexmeasures/ui/crud/assets.py @@ -58,7 +58,7 @@ def validate_on_submit(self): return super().validate_on_submit() def to_json(self) -> dict: - """ turn form data into a JSON we can POST to our internal API """ + """turn form data into a JSON we can POST to our internal API""" data = copy.copy(self.data) data["longitude"] = float(data["longitude"]) data["latitude"] = float(data["latitude"]) diff --git a/flexmeasures/ui/utils/plotting_utils.py b/flexmeasures/ui/utils/plotting_utils.py index 88568bfc9..c01a73bfc 100644 --- a/flexmeasures/ui/utils/plotting_utils.py +++ b/flexmeasures/ui/utils/plotting_utils.py @@ -432,7 +432,7 @@ def create_graph( # noqa: C901 def make_datasource_from(data: pd.DataFrame, resolution: timedelta) -> ColumnDataSource: - """ Make a bokeh data source, which is for instance useful for the hover tool. """ + """Make a bokeh data source, which is for instance useful for the hover tool.""" # Set column names that our HoverTool can interpret (in case of multiple index levels, use the first one) data.index.names = ["x"] + data.index.names[1:] diff --git a/flexmeasures/ui/views/__init__.py b/flexmeasures/ui/views/__init__.py index 4614f48f9..a1980d413 100644 --- a/flexmeasures/ui/views/__init__.py +++ b/flexmeasures/ui/views/__init__.py @@ -21,6 +21,6 @@ @flexmeasures_ui.route("/docs") def docs_view(): - """ Render the Sphinx documentation """ + """Render the Sphinx documentation""" # Todo: render the docs with this nicer url and include the app's navigation menu return diff --git a/flexmeasures/ui/views/portfolio.py b/flexmeasures/ui/views/portfolio.py index 71816158c..5137c48f5 100644 --- a/flexmeasures/ui/views/portfolio.py +++ b/flexmeasures/ui/views/portfolio.py @@ -319,7 +319,7 @@ def mock_flex_action_in_main_figure(fig_profile: Figure): def get_flex_action_hour(h: int) -> datetime: - """ get the next hour from now on """ + """get the next hour from now on""" this_hour = time_utils.get_most_recent_hour() return [ dt diff --git a/flexmeasures/utils/config_utils.py b/flexmeasures/utils/config_utils.py index ae2e7da24..a17526bea 100644 --- a/flexmeasures/utils/config_utils.py +++ b/flexmeasures/utils/config_utils.py @@ -63,7 +63,7 @@ def check_app_env(env: Optional[str]): def read_config(app: Flask, custom_path_to_config: Optional[str]): - """Read configuration from various expected sources, complain if not setup correctly. """ + """Read configuration from various expected sources, complain if not setup correctly.""" check_app_env(app.env) @@ -147,7 +147,7 @@ def read_custom_config( def read_required_env_vars(app: Flask): - """ All required variables and the plugins can be set as env var""" + """All required variables and the plugins can be set as env var""" for var in required: app.config[var] = os.getenv(var, app.config.get(var, None)) diff --git a/flexmeasures/utils/flexmeasures_inflection.py b/flexmeasures/utils/flexmeasures_inflection.py index be1919bf8..0015b0e45 100644 --- a/flexmeasures/utils/flexmeasures_inflection.py +++ b/flexmeasures/utils/flexmeasures_inflection.py @@ -11,7 +11,7 @@ def capitalize(x: str, lower_case_remainder: bool = False) -> str: - """ Capitalize string with control over whether to lower case the remainder.""" + """Capitalize string with control over whether to lower case the remainder.""" if lower_case_remainder: return x.capitalize() return x[0].upper() + x[1:] From 569dbd56fca5333dedc3d80549704b5eef5e5072 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Fri, 7 Jan 2022 16:14:22 +0100 Subject: [PATCH 35/46] Stop saving unchanged weather forecasts (#305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * stop saving unchanged weather forecasts Signed-off-by: Nicolas Höning * Catch trivial aggregation case Signed-off-by: F.N. Claessen * print save_to_db status; rename variable for more clarity of current data model assumptions; refactor complex function Signed-off-by: Nicolas Höning Co-authored-by: F.N. Claessen --- flexmeasures/data/scripts/grid_weather.py | 102 ++++++++++++++-------- flexmeasures/data/services/time_series.py | 9 +- 2 files changed, 75 insertions(+), 36 deletions(-) diff --git a/flexmeasures/data/scripts/grid_weather.py b/flexmeasures/data/scripts/grid_weather.py index 21679aa74..e9db5ca01 100755 --- a/flexmeasures/data/scripts/grid_weather.py +++ b/flexmeasures/data/scripts/grid_weather.py @@ -9,6 +9,7 @@ from flask import Flask, current_app import requests import pytz +from timely_beliefs import BeliefsDataFrame from flexmeasures.utils.time_utils import as_server_time, get_timezone from flexmeasures.utils.geo_utils import compute_irradiance @@ -17,6 +18,7 @@ from flexmeasures.data.transactional import task_with_status_report from flexmeasures.data.models.data_sources import DataSource from flexmeasures.data.models.time_series import Sensor, TimedBelief +from flexmeasures.data.utils import save_to_db FILE_PATH_LOCATION = "/../raw_data/weather-forecasts" DATA_SOURCE_NAME = "OpenWeatherMap" @@ -335,6 +337,37 @@ def call_openweatherapi( return data["current"]["dt"], data["hourly"] +def find_weather_sensor_by_location_or_fail( + weather_sensor: Sensor, + location: Tuple[float, float], + max_degree_difference_for_nearest_weather_sensor: int, + flexmeasures_asset_type: str, +) -> Optional[Sensor]: + """ + Try to find a weather sensor of fitting type close by. + Complain if the nearest weather sensor is further away than some minimum degrees. + """ + weather_sensor: Optional[Sensor] = find_closest_sensor( + flexmeasures_asset_type, lat=location[0], lng=location[1] + ) + if weather_sensor is not None: + if abs( + location[0] - weather_sensor.location[0] + ) > max_degree_difference_for_nearest_weather_sensor or abs( + location[1] - weather_sensor.location[1] + > max_degree_difference_for_nearest_weather_sensor + ): + raise Exception( + f"No sufficiently close weather sensor found (within 2 degrees distance) for type {flexmeasures_asset_type}! We're looking for: {location}, closest available: ({weather_sensor.location})" + ) + else: + raise Exception( + "No weather sensor set up for this sensor type (%s)" + % flexmeasures_asset_type + ) + return weather_sensor + + def save_forecasts_in_db( api_key: str, locations: List[Tuple[float, float]], @@ -347,8 +380,8 @@ def save_forecasts_in_db( click.echo("[FLEXMEASURES] Getting weather forecasts:") click.echo("[FLEXMEASURES] Latitude, Longitude") click.echo("[FLEXMEASURES] -----------------------") - db_forecasts = [] - weather_sensors: dict = {} # keep track of the sensors to save lookups + weather_sensors: Dict[str, Sensor] = {} # keep track of the sensors to save lookups + db_forecasts: Dict[Sensor, List[TimedBelief]] = {} # collect beliefs per sensor for location in locations: click.echo("[FLEXMEASURES] %s, %s" % location) @@ -362,8 +395,10 @@ def save_forecasts_in_db( % time_of_api_call ) - # map sensor name in our db to sensor name/label in OWM response - sensor_name_mapping = dict( + # map asset type name in our db to sensor name/label in OWM response + # TODO: This assumes one asset per sensor in our database, should move to + # one weather station asset per location, with multiple sensors. + asset_type_to_OWM_sensor_mapping = dict( temperature="temp", wind_speed="wind_speed", radiation="clouds" ) @@ -377,35 +412,26 @@ def save_forecasts_in_db( "[FLEXMEASURES] Processing forecast for %s (horizon: %s) ..." % (fc_datetime, fc_horizon) ) - for flexmeasures_sensor_type in sensor_name_mapping.keys(): - needed_response_label = sensor_name_mapping[flexmeasures_sensor_type] + for flexmeasures_asset_type in asset_type_to_OWM_sensor_mapping.keys(): + needed_response_label = asset_type_to_OWM_sensor_mapping[ + flexmeasures_asset_type + ] if needed_response_label in fc: - weather_sensor = weather_sensors.get(flexmeasures_sensor_type, None) + weather_sensor = weather_sensors.get(flexmeasures_asset_type, None) if weather_sensor is None: - weather_sensor: Optional[Sensor] = find_closest_sensor( - flexmeasures_sensor_type, lat=location[0], lng=location[1] + weather_sensor = find_weather_sensor_by_location_or_fail( + weather_sensor, + location, + max_degree_difference_for_nearest_weather_sensor, + flexmeasures_asset_type, ) - if weather_sensor is not None: - # Complain if the nearest weather sensor is further away than 2 degrees - if abs( - location[0] - weather_sensor.location[0] - ) > max_degree_difference_for_nearest_weather_sensor or abs( - location[1] - weather_sensor.location[1] - > max_degree_difference_for_nearest_weather_sensor - ): - raise Exception( - f"No sufficiently close weather sensor found (within 2 degrees distance) for type {flexmeasures_sensor_type}! We're looking for: {location}, closest available: ({weather_sensor.location})" - ) - weather_sensors[flexmeasures_sensor_type] = weather_sensor - else: - raise Exception( - "No weather sensor set up for this sensor type (%s)" - % flexmeasures_sensor_type - ) + weather_sensors[flexmeasures_asset_type] = weather_sensor + if weather_sensor not in db_forecasts.keys(): + db_forecasts[weather_sensor] = [] fc_value = fc[needed_response_label] # the radiation is not available in OWM -> we compute it ourselves - if flexmeasures_sensor_type == "radiation": + if flexmeasures_asset_type == "radiation": fc_value = compute_irradiance( location[0], location[1], @@ -414,7 +440,7 @@ def save_forecasts_in_db( fc[needed_response_label] / 100.0, ) - db_forecasts.append( + db_forecasts[weather_sensor].append( TimedBelief( event_start=fc_datetime, belief_horizon=fc_horizon, @@ -431,12 +457,20 @@ def save_forecasts_in_db( ) click.echo("[FLEXMEASURES] %s" % msg) current_app.logger.warning(msg) - if len(db_forecasts) == 0: - # This is probably a serious problem - raise Exception( - "Nothing to put in the database was produced. That does not seem right..." - ) - db.session.bulk_save_objects(db_forecasts) + for sensor in db_forecasts.keys(): + click.echo(f"Saving {sensor.name} forecasts ...") + if len(db_forecasts[sensor]) == 0: + # This is probably a serious problem + raise Exception( + "Nothing to put in the database was produced. That does not seem right..." + ) + status = save_to_db(BeliefsDataFrame(db_forecasts[sensor])) + if status == "success_but_nothing_new": + current_app.logger.info( + "Done. These beliefs had already been saved before." + ) + elif status == "success_with_unchanged_beliefs_skipped": + current_app.logger.info("Done. Some beliefs had already been saved before.") def save_forecasts_as_json( diff --git a/flexmeasures/data/services/time_series.py b/flexmeasures/data/services/time_series.py index 5765b9525..e2832ea8e 100644 --- a/flexmeasures/data/services/time_series.py +++ b/flexmeasures/data/services/time_series.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Optional, Tuple, Union, Callable +from typing import Any, List, Dict, Optional, Tuple, Union, Callable from datetime import datetime, timedelta import inflect @@ -259,13 +259,18 @@ def convert_query_window_for_demo( return start, end -def aggregate_values(bdf_dict: Dict[str, tb.BeliefsDataFrame]) -> tb.BeliefsDataFrame: +def aggregate_values(bdf_dict: Dict[Any, tb.BeliefsDataFrame]) -> tb.BeliefsDataFrame: # todo: test this function rigorously, e.g. with empty bdfs in bdf_dict # todo: consider 1 bdf with beliefs from source A, plus 1 bdf with beliefs from source B -> 1 bdf with sources A+B # todo: consider 1 bdf with beliefs from sources A and B, plus 1 bdf with beliefs from source C. -> 1 bdf with sources A+B and A+C # todo: consider 1 bdf with beliefs from sources A and B, plus 1 bdf with beliefs from source C and D. -> 1 bdf with sources A+B, A+C, B+C and B+D # Relevant issue: https://github.com/SeitaBV/timely-beliefs/issues/33 + + # Nothing to aggregate + if len(bdf_dict) == 1: + return list(bdf_dict.values())[0] + unique_source_ids: List[int] = [] for bdf in bdf_dict.values(): unique_source_ids.extend(bdf.lineage.sources) From 82387ccd362c2dd5b922aa89673537f2f41350db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Tue, 11 Jan 2022 16:46:53 +0100 Subject: [PATCH 36/46] Commit session after calling new save_to_db function (#308) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * commit when using modern save_to_db and improve docstring Signed-off-by: Nicolas Höning * mention Seita's Github profile Signed-off-by: Nicolas Höning --- Readme.md | 1 + flexmeasures/api/common/utils/api_utils.py | 1 + flexmeasures/data/services/forecasting.py | 1 + flexmeasures/data/services/scheduling.py | 1 + flexmeasures/data/utils.py | 2 +- 5 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Readme.md b/Readme.md index 98c01a8ff..a01982e7e 100644 --- a/Readme.md +++ b/Readme.md @@ -29,6 +29,7 @@ FlexMeasures provides three core values: FlexMeasures is developed by [Seita BV](https://www.seita.nl) in The Netherlands. We made FlexMeasures freely available under the Apache2.0 licence. Please get in contact if you use FlexMeasures or are considering it. +See also [Seita's Github profile](https://github.com/SeitaBV), e.g. for FlexMeasures plugin examples. Head over to our [documentation](https://flexmeasures.readthedocs.io), e.g. the [getting started guide](https://flexmeasures.readthedocs.io/en/latest/getting-started.html). Or find more information on [FlexMeasures.io](https://flexmeasures.io). diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index 2c5463df7..f60be2ba4 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -363,6 +363,7 @@ def save_and_enqueue( status = modern_save_to_db( data, save_changed_beliefs_only=save_changed_beliefs_only ) + db.session.commit() # Only enqueue forecasting jobs upon successfully saving new data if status[:7] == "success" and status != "success_but_nothing_new": diff --git a/flexmeasures/data/services/forecasting.py b/flexmeasures/data/services/forecasting.py index ad05358a6..eeb91ec40 100644 --- a/flexmeasures/data/services/forecasting.py +++ b/flexmeasures/data/services/forecasting.py @@ -246,6 +246,7 @@ def make_rolling_viewpoint_forecasts( ] bdf = tb.BeliefsDataFrame(ts_value_forecasts) save_to_db(bdf) + db.session.commit() return len(forecasts) diff --git a/flexmeasures/data/services/scheduling.py b/flexmeasures/data/services/scheduling.py index 13af125bd..8518e4d46 100644 --- a/flexmeasures/data/services/scheduling.py +++ b/flexmeasures/data/services/scheduling.py @@ -170,6 +170,7 @@ def make_schedule( ] # For consumption schedules, positive values denote consumption. For the db, consumption is negative bdf = tb.BeliefsDataFrame(ts_value_schedule) save_to_db(bdf) + db.session.commit() return True diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index cb36a2357..8a4b8cacd 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -55,7 +55,7 @@ def save_to_db( ) -> str: """Save the timed beliefs to the database. - NB Flushes the session. Best to keep transactions short. + Note: This function does not commit. It does, however, flush the session. Best to keep transactions short. We make the distinction between updating beliefs and replacing beliefs. From f7c6ab07302bdcd6b2b7ac8d3e4402781602c912 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Wed, 12 Jan 2022 11:12:19 +0100 Subject: [PATCH 37/46] Sensor charts load most recent beliefs by default (#307) This represents a significant speed-up for loading charts of sensors with many beliefs, such as charts for power actuators whose planned schedules are updated many times before actuation. Signed-off-by: F.N. Claessen --- flexmeasures/data/models/time_series.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 5513d5d0b..304ae613a 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -244,6 +244,7 @@ def chart( source: Optional[ Union[DataSource, List[DataSource], int, List[int], str, List[str]] ] = None, + most_recent_beliefs_only: bool = True, include_data: bool = False, dataset_name: Optional[str] = None, **kwargs, @@ -256,6 +257,7 @@ def chart( :param beliefs_after: only return beliefs formed after this datetime (inclusive) :param beliefs_before: only return beliefs formed before this datetime (inclusive) :param source: search only beliefs by this source (pass the DataSource, or its name or id) or list of sources + :param most_recent_beliefs_only: only return the most recent beliefs for each event from each source (minimum belief horizon) :param include_data: if True, include data in the chart, or if False, exclude data :param dataset_name: optionally name the dataset used in the chart (the default name is sensor_) """ @@ -283,6 +285,7 @@ def chart( event_ends_before=event_ends_before, beliefs_after=beliefs_after, beliefs_before=beliefs_before, + most_recent_beliefs_only=most_recent_beliefs_only, source=source, ) # Combine chart specs and data From 932034f7471820617ba9ce1c8736734d106ec14a Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Wed, 12 Jan 2022 22:15:00 +0100 Subject: [PATCH 38/46] Mobile friendly (responsive) charts of sensor data, and such charts can be requested with a custom width and height. Responsive sensor chart (#313) * Append to transforms possibly already coming out of the chart specs Signed-off-by: F.N. Claessen * Let chart specs derive title, quantity and unit from Sensor object Signed-off-by: F.N. Claessen * Derive bar width from sensor's event resolution Signed-off-by: F.N. Claessen * Fix attribute call Signed-off-by: F.N. Claessen * Responsive sensor charts: scales width to container, which is better for mobile. Also exposes width and height as overridable chart properties for getting charts by API. Signed-off-by: F.N. Claessen * Changelog entry Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 1 + flexmeasures/api/dev/sensors.py | 2 ++ .../data/models/charts/belief_charts.py | 25 ++++++++++++++----- flexmeasures/data/models/charts/defaults.py | 21 +++++++++++++--- flexmeasures/data/models/time_series.py | 5 +--- flexmeasures/ui/templates/views/sensors.html | 9 +++++-- 6 files changed, 47 insertions(+), 16 deletions(-) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index be9094405..76d234000 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -11,6 +11,7 @@ v0.8.0 | November XX, 2021 New features ----------- * Charts with sensor data can be requested in one of the supported [`vega-lite themes `_] (incl. a dark theme) [see `PR #221 `_] +* Mobile friendly (responsive) charts of sensor data, and such charts can be requested with a custom width and height [see `PR #313 `_] * Schedulers take into account round-trip efficiency if set [see `PR #291 `_] Bugfixes diff --git a/flexmeasures/api/dev/sensors.py b/flexmeasures/api/dev/sensors.py index 38aa1fb06..2d19edf3f 100644 --- a/flexmeasures/api/dev/sensors.py +++ b/flexmeasures/api/dev/sensors.py @@ -29,6 +29,8 @@ class SensorAPI(FlaskView): "beliefs_before": AwareDateTimeField(format="iso", required=False), "include_data": fields.Boolean(required=False), "dataset_name": fields.Str(required=False), + "height": fields.Str(required=False), + "width": fields.Str(required=False), }, location="query", ) diff --git a/flexmeasures/data/models/charts/belief_charts.py b/flexmeasures/data/models/charts/belief_charts.py index 0dea2f611..d14623e7a 100644 --- a/flexmeasures/data/models/charts/belief_charts.py +++ b/flexmeasures/data/models/charts/belief_charts.py @@ -1,21 +1,25 @@ from flexmeasures.data.models.charts.defaults import FIELD_DEFINITIONS +from flexmeasures.utils.flexmeasures_inflection import capitalize -def bar_chart(title: str, quantity: str = "unknown quantity", unit: str = "a.u."): - if not unit: - unit = "a.u." +def bar_chart( + sensor: "Sensor", # noqa F821 + **override_chart_specs: dict, +): + unit = sensor.unit if sensor.unit else "a.u." event_value_field_definition = dict( - title=f"{quantity} ({unit})", + title=f"{capitalize(sensor.sensor_type)} ({unit})", format=".3s", stack=None, **FIELD_DEFINITIONS["event_value"], ) - return { + chart_specs = { "description": "A simple bar chart.", - "title": title, + "title": capitalize(sensor.name), "mark": "bar", "encoding": { "x": FIELD_DEFINITIONS["event_start"], + "x2": FIELD_DEFINITIONS["event_end"], "y": event_value_field_definition, "color": FIELD_DEFINITIONS["source"], "opacity": {"value": 0.7}, @@ -25,4 +29,13 @@ def bar_chart(title: str, quantity: str = "unknown quantity", unit: str = "a.u." FIELD_DEFINITIONS["source"], ], }, + "transform": [ + { + "calculate": f"datum.event_start + {sensor.event_resolution.total_seconds() * 1000}", + "as": "event_end", + }, + ], } + for k, v in override_chart_specs.items(): + chart_specs[k] = v + return chart_specs diff --git a/flexmeasures/data/models/charts/defaults.py b/flexmeasures/data/models/charts/defaults.py index 10b8d8a84..17e1b60bc 100644 --- a/flexmeasures/data/models/charts/defaults.py +++ b/flexmeasures/data/models/charts/defaults.py @@ -16,6 +16,11 @@ type="temporal", title=None, ), + "event_end": dict( + field="event_end", + type="temporal", + title=None, + ), "event_value": dict( field="event_value", type="quantitative", @@ -48,14 +53,22 @@ def decorated_chart_specs(*args, **kwargs): chart_specs.pop("$schema") if dataset_name: chart_specs["data"] = {"name": dataset_name} - chart_specs["height"] = HEIGHT - chart_specs["width"] = WIDTH - chart_specs["transform"] = [ + + # Fall back to default height and width, if needed + if "height" not in chart_specs: + chart_specs["height"] = HEIGHT + if "width" not in chart_specs: + chart_specs["width"] = WIDTH + + # Add transform function to calculate full date + if "transform" not in chart_specs: + chart_specs["transform"] = [] + chart_specs["transform"].append( { "as": "full_date", "calculate": f"timeFormat(datum.event_start, '{TIME_FORMAT}')", } - ] + ) return chart_specs return decorated_chart_specs diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 304ae613a..474d5d4c0 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -28,7 +28,6 @@ from flexmeasures.data.models.generic_assets import GenericAsset from flexmeasures.data.models.validation_utils import check_required_attributes from flexmeasures.utils.time_utils import server_now -from flexmeasures.utils.flexmeasures_inflection import capitalize class Sensor(db.Model, tb.SensorDBMixin, AuthModelMixin): @@ -270,9 +269,7 @@ def chart( ) # todo remove this placeholder when sensor types are modelled chart_specs = chart_type_to_chart_specs( chart_type, - title=capitalize(self.name), - quantity=capitalize(self.sensor_type), - unit=self.unit, + sensor=self, dataset_name=dataset_name, **kwargs, ) diff --git a/flexmeasures/ui/templates/views/sensors.html b/flexmeasures/ui/templates/views/sensors.html index 78a971c7d..543f43d17 100644 --- a/flexmeasures/ui/templates/views/sensors.html +++ b/flexmeasures/ui/templates/views/sensors.html @@ -9,7 +9,12 @@
-

+
+
+
+
+
+
@@ -31,7 +36,7 @@ async function embedAndLoad(chartSpecsPath, elementId, datasetName) { - await vegaEmbed('#'+elementId, chartSpecsPath + '?dataset_name=' + datasetName, {{ chart_options | safe }}) + await vegaEmbed('#'+elementId, chartSpecsPath + '?dataset_name=' + datasetName + '&width=container', {{ chart_options | safe }}) .then(function (result) { // result.view is the Vega View, chartSpecsPath is the original Vega-Lite specification vegaView = result.view; From 14ffcdebd71de2378231a896e4b5da6d4fb09ff5 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Thu, 13 Jan 2022 09:13:48 +0100 Subject: [PATCH 39/46] Fix resolution of bar charts (#310) Better bar widths, based on sensor resolution. * Append to transforms possibly already coming out of the chart specs Signed-off-by: F.N. Claessen * Let chart specs derive title, quantity and unit from Sensor object Signed-off-by: F.N. Claessen * Derive bar width from sensor's event resolution Signed-off-by: F.N. Claessen * Fix attribute call Signed-off-by: F.N. Claessen * Changelog entry Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 76d234000..8f5567308 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -17,6 +17,7 @@ New features Bugfixes ----------- * Fix recording time of schedules triggered by UDI events [see `PR #300 `_] +* Set bar width of bar charts based on sensor resolution [see `PR #310 `_] Infrastructure / Support ---------------------- From 874b06008233a9a13176cab6a8a643f796ad8cf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Fri, 14 Jan 2022 15:47:49 +0100 Subject: [PATCH 40/46] add legacy comments (and reasoning) to a list of modules and functions (#309) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add legacy comments (and reasoning) to a list of modules and functions Signed-off-by: Nicolas Höning * add legacy notes to models and schemas Signed-off-by: Nicolas Höning * add some legacy comments to CLI add/delete Signed-off-by: Nicolas Höning --- flexmeasures/cli/data_add.py | 9 ++++++++- flexmeasures/cli/data_delete.py | 3 ++- flexmeasures/data/models/assets.py | 16 ++++++++++++---- flexmeasures/data/models/markets.py | 14 ++++++++++---- flexmeasures/data/models/weather.py | 15 ++++++++++----- flexmeasures/data/queries/analytics.py | 5 +++++ flexmeasures/data/queries/portfolio.py | 6 ++++++ flexmeasures/data/schemas/weather.py | 2 ++ flexmeasures/data/services/resources.py | 6 ++++++ flexmeasures/ui/utils/plotting_utils.py | 5 +++++ flexmeasures/ui/views/analytics.py | 6 ++++++ flexmeasures/ui/views/charts.py | 9 +++------ flexmeasures/ui/views/dashboard.py | 3 ++- flexmeasures/ui/views/logged_in_user.py | 4 ++++ flexmeasures/ui/views/portfolio.py | 8 ++++++++ 15 files changed, 89 insertions(+), 22 deletions(-) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 1859ddbe8..176973fc2 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -279,6 +279,7 @@ def add_generic_asset(**args): def new_asset(**args): """ Create a new asset. + This is legacy, with the new data model we only want to add GenericAssets. """ check_timezone(args["timezone"]) # if no market given, select dummy market @@ -328,7 +329,11 @@ def new_asset(**args): help="timezone as string, e.g. 'UTC' (default) or 'Europe/Amsterdam'", ) def add_weather_sensor(**args): - """Add a weather sensor.""" + """ + Add a weather sensor. + This is legacy, after we moved to the new data model. + Adding necessary GenericAsset and Sensor(s) should be done by the (to be built) OWM plugin. + """ check_timezone(args["timezone"]) check_errors(WeatherSensorSchema().validate(args)) args["event_resolution"] = timedelta(minutes=args["event_resolution"]) @@ -668,6 +673,8 @@ def collect_weather_data(region, location, num_cells, method, store_in_db): This function can get weather data for one location or for several locations within a geometrical grid (See the --location parameter). + + This should move to a FlexMeasures plugin for OWM integration. """ from flexmeasures.data.scripts.grid_weather import get_weather_forecasts diff --git a/flexmeasures/cli/data_delete.py b/flexmeasures/cli/data_delete.py index 005c22a07..33c001097 100644 --- a/flexmeasures/cli/data_delete.py +++ b/flexmeasures/cli/data_delete.py @@ -142,7 +142,8 @@ def delete_structure(force): markets (types) and weather sensors (types) and users. TODO: This could in our future data model (currently in development) be replaced by - `flexmeasures delete generic-asset-type` and `flexmeasures delete sensor`. + `flexmeasures delete generic-asset-type`, `flexmeasures delete generic-asset` + and `flexmeasures delete sensor`. """ if not force: confirm_deletion(structure=True) diff --git a/flexmeasures/data/models/assets.py b/flexmeasures/data/models/assets.py index 49415fedc..2fd82025d 100644 --- a/flexmeasures/data/models/assets.py +++ b/flexmeasures/data/models/assets.py @@ -23,7 +23,11 @@ class AssetType(db.Model): - """Describing asset types for our purposes""" + """ + Describing asset types for our purposes + + This model is now considered legacy. See GenericAssetType. + """ name = db.Column(db.String(80), primary_key=True) # The name we want to see (don't unnecessarily capitalize, so it can be used in a sentence) @@ -88,7 +92,11 @@ def __repr__(self): class Asset(db.Model, tb.SensorDBMixin): - """Each asset is an energy- consuming or producing hardware.""" + """ + Each asset is an energy- consuming or producing hardware. + + This model is now considered legacy. See GenericAsset and Sensor. + """ id = db.Column( db.Integer, db.ForeignKey("sensor.id"), primary_key=True, autoincrement=True @@ -315,8 +323,8 @@ class Power(TimedValue, db.Model): """ All measurements of power data are stored in one slim table. Negative values indicate consumption. - TODO: datetime objects take up most of the space (12 bytes each)). One way out is to normalise them out to a table. - TODO: If there are more than one measurement per asset per time step possible, we can expand rather easily. + + This model is now considered legacy. See TimedBelief. """ sensor_id = db.Column( diff --git a/flexmeasures/data/models/markets.py b/flexmeasures/data/models/markets.py index c12cae9c6..5961598ea 100644 --- a/flexmeasures/data/models/markets.py +++ b/flexmeasures/data/models/markets.py @@ -21,8 +21,9 @@ class MarketType(db.Model): - """Describing market types for our purposes. - TODO: Add useful attributes like frequency (e.g. 1H) and the meaning of units (e.g. Mwh). + """ + Describing market types for our purposes. + This model is now considered legacy. See GenericAssetType. """ name = db.Column(db.String(80), primary_key=True) @@ -59,7 +60,11 @@ def __repr__(self): class Market(db.Model, tb.SensorDBMixin): - """Each market is a pricing service.""" + """ + Each market is a pricing service. + + This model is now considered legacy. See GenericAsset and Sensor. + """ id = db.Column( db.Integer, db.ForeignKey("sensor.id"), primary_key=True, autoincrement=True @@ -190,7 +195,8 @@ def to_dict(self) -> Dict[str, str]: class Price(TimedValue, db.Model): """ All prices are stored in one slim table. - TODO: datetime objects take up most of the space (12 bytes each)). One way out is to normalise them out to a table. + + This model is now considered legacy. See TimedBelief. """ sensor_id = db.Column( diff --git a/flexmeasures/data/models/weather.py b/flexmeasures/data/models/weather.py index ee3ea53ee..23c4f441c 100644 --- a/flexmeasures/data/models/weather.py +++ b/flexmeasures/data/models/weather.py @@ -23,8 +23,8 @@ class WeatherSensorType(db.Model): - """ " - TODO: Add useful attributes like ...? + """ + This model is now considered legacy. See GenericAssetType. """ name = db.Column(db.String(80), primary_key=True) @@ -49,8 +49,12 @@ def __repr__(self): class WeatherSensor(db.Model, tb.SensorDBMixin): - """A weather sensor has a location on Earth and measures weather values of a certain weather sensor type, such as - temperature, wind speed and radiation.""" + """ + A weather sensor has a location on Earth and measures weather values of a certain weather sensor type, such as + temperature, wind speed and radiation. + + This model is now considered legacy. See GenericAsset and Sensor. + """ id = db.Column( db.Integer, db.ForeignKey("sensor.id"), primary_key=True, autoincrement=True @@ -245,7 +249,8 @@ def to_dict(self) -> Dict[str, str]: class Weather(TimedValue, db.Model): """ All weather measurements are stored in one slim table. - TODO: datetime objects take up most of the space (12 bytes each)). One way out is to normalise them out to a table. + + This model is now considered legacy. See TimedBelief. """ sensor_id = db.Column( diff --git a/flexmeasures/data/queries/analytics.py b/flexmeasures/data/queries/analytics.py index 922290097..ba5782870 100644 --- a/flexmeasures/data/queries/analytics.py +++ b/flexmeasures/data/queries/analytics.py @@ -16,6 +16,11 @@ from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.models.weather import WeatherSensorType +""" +These queries are considered legacy by now. +They are used in legacy views and use the old data model. +""" + def get_power_data( resource: Union[str, Resource], # name or instance diff --git a/flexmeasures/data/queries/portfolio.py b/flexmeasures/data/queries/portfolio.py index 50bf8c517..8b5858340 100644 --- a/flexmeasures/data/queries/portfolio.py +++ b/flexmeasures/data/queries/portfolio.py @@ -9,6 +9,12 @@ from flexmeasures.data.services.resources import Resource +""" +This is considered legacy code now. +The view is considered legacy, and it relies on the old data model. +""" + + def get_structure( assets: List[Asset], ) -> Tuple[Dict[str, AssetType], List[Market], Dict[str, Resource]]: diff --git a/flexmeasures/data/schemas/weather.py b/flexmeasures/data/schemas/weather.py index 5e81f63fc..5b7b30f9b 100644 --- a/flexmeasures/data/schemas/weather.py +++ b/flexmeasures/data/schemas/weather.py @@ -8,6 +8,8 @@ class WeatherSensorSchema(SensorSchemaMixin, ma.SQLAlchemySchema): """ WeatherSensor schema, with validations. + + This is considered legacy now, as the WeatherSensor creation CLI task is also going to be deprecated. """ class Meta: diff --git a/flexmeasures/data/services/resources.py b/flexmeasures/data/services/resources.py index 3af10f4fd..13f48fac5 100644 --- a/flexmeasures/data/services/resources.py +++ b/flexmeasures/data/services/resources.py @@ -39,6 +39,12 @@ from flexmeasures.utils.geo_utils import parse_lat_lng from flexmeasures.utils import coding_utils, time_utils +""" +This module is legacy, as we move to the new data model (see projects on Github). +Do check, but apart from get_sensors (which needs a rewrite), functionality has +either been copied in services/asset_grouping or is not needed any more. +Two views using this (analytics and portfolio) are also considered legacy. +""" p = inflect.engine() cached_property = coding_utils.make_registering_decorator(cached_property) diff --git a/flexmeasures/ui/utils/plotting_utils.py b/flexmeasures/ui/utils/plotting_utils.py index c01a73bfc..d75c3dcad 100644 --- a/flexmeasures/ui/utils/plotting_utils.py +++ b/flexmeasures/ui/utils/plotting_utils.py @@ -34,6 +34,11 @@ ) from flexmeasures.ui.utils.view_utils import set_time_range_for_session +""" +This module is by now considered legacy, as we're moving away from Bokeh and to Altair. +See flexmeasures/data/models/charts +""" + def create_hover_tool( # noqa: C901 y_unit: str, resolution: timedelta, as_beliefs: bool = False diff --git a/flexmeasures/ui/views/analytics.py b/flexmeasures/ui/views/analytics.py index 881555021..ac74e355b 100644 --- a/flexmeasures/ui/views/analytics.py +++ b/flexmeasures/ui/views/analytics.py @@ -42,6 +42,12 @@ from flexmeasures.ui.utils.plotting_utils import create_graph, separate_legend from flexmeasures.ui.views import flexmeasures_ui +""" +These views are considered legacy by now. They is too specific to a use case and also +rely on Bokeh. We might move them (re-implement with Altair) in a plugin. +When removing this, also remove the templates. +""" + @flexmeasures_ui.route("/analytics", methods=["GET", "POST"]) @account_roles_accepted("Prosumer") diff --git a/flexmeasures/ui/views/charts.py b/flexmeasures/ui/views/charts.py index 79e703615..02b669a30 100644 --- a/flexmeasures/ui/views/charts.py +++ b/flexmeasures/ui/views/charts.py @@ -14,12 +14,9 @@ """ An endpoint to get a power chart. -This will grow to become code for more charts eventually. -The plan is to separate charts specs from the actual data later, -and to switch to Altair. - -For now, we'll keep this endpoint here, with route and implementation in the same file. -When we move forward, we'll review the architecture. +This chart view is considered legacy now. See flexmeasures/data/models/charts for our new approach +to support charts per API, using Altair and the Sensor ID. +It also uses the old data model. """ diff --git a/flexmeasures/ui/views/dashboard.py b/flexmeasures/ui/views/dashboard.py index 51a881b81..b70414cfd 100644 --- a/flexmeasures/ui/views/dashboard.py +++ b/flexmeasures/ui/views/dashboard.py @@ -13,7 +13,8 @@ ) """ -Note: This view is deprecated. +Note: This view is deprecated / legacy. +Also remove its HTML template. """ diff --git a/flexmeasures/ui/views/logged_in_user.py b/flexmeasures/ui/views/logged_in_user.py index 012cbf766..01e3be569 100644 --- a/flexmeasures/ui/views/logged_in_user.py +++ b/flexmeasures/ui/views/logged_in_user.py @@ -9,6 +9,10 @@ @flexmeasures_ui.route("/logged-in-user", methods=["GET"]) @login_required def logged_in_user_view(): + """TODO: + - Show account name & roles + - Count their assets with a query, link to their (new) list + """ return render_flexmeasures_template( "admin/logged_in_user.html", logged_in_user=current_user, diff --git a/flexmeasures/ui/views/portfolio.py b/flexmeasures/ui/views/portfolio.py index 5137c48f5..673d1b41b 100644 --- a/flexmeasures/ui/views/portfolio.py +++ b/flexmeasures/ui/views/portfolio.py @@ -28,6 +28,14 @@ set_time_range_for_session, ) +""" +This view is considered legacy by now. It is too specific to a use case and also +relies on Bokeh. It also contains mock code. +We might re-implement with Altair, as an account portfolio overview is handy. +Maybe for all sensors sharing some trait, for instance all power sensors. +When removing this, also remove the template. +""" + @flexmeasures_ui.route("/portfolio", methods=["GET", "POST"]) @account_roles_accepted("Prosumer") From 6cad7e8def0d37fca403506f6b07812e58b95a40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Fri, 14 Jan 2022 17:42:08 +0100 Subject: [PATCH 41/46] Access to public assets (#316) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * better name for the EVERYONE constant - we only mean all logged-in users. Signed-off-by: Nicolas Höning * allow every logged-in user to read public assets and sensors Signed-off-by: Nicolas Höning * better docstring in __acl__ Signed-off-by: Nicolas Höning * fix __acl__ docstrings Signed-off-by: Nicolas Höning * even better __acl__ docstring Signed-off-by: Nicolas Höning --- flexmeasures/auth/policy.py | 4 ++-- flexmeasures/data/models/generic_assets.py | 12 ++++++------ flexmeasures/data/models/time_series.py | 9 ++++++--- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/flexmeasures/auth/policy.py b/flexmeasures/auth/policy.py index 756e8afa0..37e030fbb 100644 --- a/flexmeasures/auth/policy.py +++ b/flexmeasures/auth/policy.py @@ -9,7 +9,7 @@ ADMIN_READER_ROLE = "admin-reader" # constants to allow access to certain groups -EVERYONE = "everyone" +EVERY_LOGGED_IN_USER = "every-logged-in-user" PRINCIPALS_TYPE = Union[str, Tuple[str], List[Union[str, Tuple[str]]]] @@ -89,7 +89,7 @@ def user_matches_principals(user, principals: PRINCIPALS_TYPE) -> bool: matchable_principals = ( matchable_principals, ) # now we handle only Tuple[str] - if EVERYONE in matchable_principals: + if EVERY_LOGGED_IN_USER in matchable_principals: return True if user is not None and all( [ diff --git a/flexmeasures/data/models/generic_assets.py b/flexmeasures/data/models/generic_assets.py index e33526e14..60af5e119 100644 --- a/flexmeasures/data/models/generic_assets.py +++ b/flexmeasures/data/models/generic_assets.py @@ -11,7 +11,7 @@ from flexmeasures.data import db from flexmeasures.data.models.user import User -from flexmeasures.auth.policy import AuthModelMixin +from flexmeasures.auth.policy import AuthModelMixin, EVERY_LOGGED_IN_USER from flexmeasures.utils import geo_utils @@ -58,15 +58,15 @@ class GenericAsset(db.Model, AuthModelMixin): def __acl__(self): """ + All logged-in users can read if the asset is public. Within same account, everyone can read and update. - Creation and deletion are left to account admins (a role we don't use yet). - Note: creation is not relevant on a GenericAsset object (as it already exists), - but we might want to use this permission to check if data *within* the asset, - like sensors, can be created. See the discussion in auth/policy. + Creation and deletion are left to account admins. """ return { "create-children": (f"account:{self.account_id}", "role:account-admin"), - "read": f"account:{self.account_id}", + "read": f"account:{self.account_id}" + if self.account_id is not None + else EVERY_LOGGED_IN_USER, "update": f"account:{self.account_id}", "delete": (f"account:{self.account_id}", "role:account-admin"), } diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index 474d5d4c0..a413418f7 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -10,7 +10,7 @@ from timely_beliefs.beliefs.probabilistic_utils import get_median_belief import timely_beliefs.utils as tb_utils -from flexmeasures.auth.policy import AuthModelMixin +from flexmeasures.auth.policy import AuthModelMixin, EVERY_LOGGED_IN_USER from flexmeasures.data.config import db from flexmeasures.data.queries.utils import ( create_beliefs_query, @@ -71,15 +71,18 @@ def __init__( def __acl__(self): """ + All logged-in users can read if the sensor belongs to a public asset. Within same account, everyone can read and update. - Deletion needs the account-admin role. + Creation and deletion are left to account admins. """ return { "create-children": ( f"account:{self.generic_asset.account_id}", "role:account-admin", ), - "read": f"account:{self.generic_asset.account_id}", + "read": f"account:{self.generic_asset.account_id}" + if self.generic_asset.account_id is not None + else EVERY_LOGGED_IN_USER, "update": f"account:{self.generic_asset.account_id}", "delete": ( f"account:{self.generic_asset.account_id}", From f3107932d78f7bafaa1f07e90bdfc78bd4fa897d Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Mon, 17 Jan 2022 09:47:02 +0100 Subject: [PATCH 42/46] Issue 311 sensible defaults for searching for beliefs (#312) The default for search methods on beliefs data is now to search most recent beliefs only. * Sensor.search_beliefs loads most recent beliefs by default Signed-off-by: F.N. Claessen * TimedBelief.search loads most recent beliefs by default Signed-off-by: F.N. Claessen * Add docstring comments Signed-off-by: F.N. Claessen * Changelog entry Signed-off-by: F.N. Claessen * Add todo for open timely-beliefs issue Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 1 + flexmeasures/data/models/time_series.py | 8 ++++++-- flexmeasures/data/services/time_series.py | 2 ++ flexmeasures/data/tests/test_queries.py | 16 ++++++++++------ .../data/tests/test_time_series_services.py | 14 +++++++------- 5 files changed, 26 insertions(+), 15 deletions(-) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 8f5567308..79b72c34e 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -29,6 +29,7 @@ Infrastructure / Support * Migrate attributes of assets, markets and weather sensors to our new sensor model [see `PR #254 `_ and `project 9 `_] * Migrate all time series data to our new sensor data model based on the `timely beliefs `_ lib [see `PR #286 `_ and `project 9 `_] * Support the new asset model (which describes the organisational structure, rather than sensors and data) in UI and API. Until the transition to our new data model is completed, the new API for assets is at `/api/dev/generic_assets`. [see `PR #251 `_ and `PR #290 `_] +* Internal search methods return most recent beliefs by default, also for charts, which can make them load a lot faster [see `PR #307 `_ and `PR #312 `_] v0.7.1 | November 08, 2021 diff --git a/flexmeasures/data/models/time_series.py b/flexmeasures/data/models/time_series.py index a413418f7..32267c04b 100644 --- a/flexmeasures/data/models/time_series.py +++ b/flexmeasures/data/models/time_series.py @@ -188,7 +188,7 @@ def search_beliefs( source: Optional[ Union[DataSource, List[DataSource], int, List[int], str, List[str]] ] = None, - most_recent_beliefs_only: bool = False, + most_recent_beliefs_only: bool = True, most_recent_events_only: bool = False, most_recent_only: bool = None, # deprecated one_deterministic_belief_per_event: bool = False, @@ -196,6 +196,8 @@ def search_beliefs( ) -> Union[tb.BeliefsDataFrame, str]: """Search all beliefs about events for this sensor. + If you don't set any filters, you get the most recent beliefs about all events. + :param event_starts_after: only return beliefs about events that start after this datetime (inclusive) :param event_ends_before: only return beliefs about events that end before this datetime (inclusive) :param beliefs_after: only return beliefs formed after this datetime (inclusive) @@ -378,7 +380,7 @@ def search( user_source_ids: Optional[Union[int, List[int]]] = None, source_types: Optional[List[str]] = None, exclude_source_types: Optional[List[str]] = None, - most_recent_beliefs_only: bool = False, + most_recent_beliefs_only: bool = True, most_recent_events_only: bool = False, most_recent_only: bool = None, # deprecated one_deterministic_belief_per_event: bool = False, @@ -387,6 +389,8 @@ def search( ) -> Union[tb.BeliefsDataFrame, Dict[str, tb.BeliefsDataFrame]]: """Search all beliefs about events for the given sensors. + If you don't set any filters, you get the most recent beliefs about all events. + :param sensors: search only these sensors, identified by their instance or id (both unique) or name (non-unique) :param event_starts_after: only return beliefs about events that start after this datetime (inclusive) :param event_ends_before: only return beliefs about events that end before this datetime (inclusive) diff --git a/flexmeasures/data/services/time_series.py b/flexmeasures/data/services/time_series.py index e2832ea8e..515fb8424 100644 --- a/flexmeasures/data/services/time_series.py +++ b/flexmeasures/data/services/time_series.py @@ -329,7 +329,9 @@ def drop_unchanged_beliefs(bdf: tb.BeliefsDataFrame) -> tb.BeliefsDataFrame: event_ends_before=bdf.event_ends[-1], beliefs_before=bdf.lineage.belief_times[0], # unique belief time source=bdf.lineage.sources[0], # unique source + most_recent_beliefs_only=False, ) + # todo: delete next line and set most_recent_beliefs_only=True when this is resolved: https://github.com/SeitaBV/timely-beliefs/issues/97 previous_most_recent_beliefs_in_db = belief_utils.select_most_recent_belief( previous_beliefs_in_db ) diff --git a/flexmeasures/data/tests/test_queries.py b/flexmeasures/data/tests/test_queries.py index 1f4232753..048ecccbe 100644 --- a/flexmeasures/data/tests/test_queries.py +++ b/flexmeasures/data/tests/test_queries.py @@ -233,10 +233,10 @@ def test_query_beliefs(setup_beliefs): sensor = Sensor.query.filter_by(name="epex_da").one_or_none() source = DataSource.query.filter_by(name="ENTSO-E").one_or_none() bdfs = [ - TimedBelief.search(sensor, source=source), - TimedBelief.search(sensor.id, source=source), - TimedBelief.search(sensor.name, source=source), - sensor.search_beliefs(source=source), + TimedBelief.search(sensor, source=source, most_recent_beliefs_only=False), + TimedBelief.search(sensor.id, source=source, most_recent_beliefs_only=False), + TimedBelief.search(sensor.name, source=source, most_recent_beliefs_only=False), + sensor.search_beliefs(source=source, most_recent_beliefs_only=False), tb.BeliefsDataFrame(sensor.beliefs)[ tb.BeliefsDataFrame(sensor.beliefs).index.get_level_values("source") == source @@ -255,7 +255,9 @@ def test_persist_beliefs(setup_beliefs, setup_test_data): """ sensor = Sensor.query.filter_by(name="epex_da").one_or_none() source = DataSource.query.filter_by(name="ENTSO-E").one_or_none() - bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor, source=source) + bdf: tb.BeliefsDataFrame = TimedBelief.search( + sensor, source=source, most_recent_beliefs_only=False + ) # Form new beliefs df = bdf.reset_index() @@ -266,5 +268,7 @@ def test_persist_beliefs(setup_beliefs, setup_test_data): ) TimedBelief.add(bdf) - bdf: tb.BeliefsDataFrame = TimedBelief.search(sensor, source=source) + bdf: tb.BeliefsDataFrame = TimedBelief.search( + sensor, source=source, most_recent_beliefs_only=False + ) assert len(bdf) == setup_beliefs * 2 diff --git a/flexmeasures/data/tests/test_time_series_services.py b/flexmeasures/data/tests/test_time_series_services.py index 9d9589aec..d14c0cbe9 100644 --- a/flexmeasures/data/tests/test_time_series_services.py +++ b/flexmeasures/data/tests/test_time_series_services.py @@ -14,7 +14,7 @@ def test_drop_unchanged_beliefs(setup_beliefs): # Set a reference for the number of beliefs stored and their belief times sensor = Sensor.query.filter_by(name="epex_da").one_or_none() - bdf = sensor.search_beliefs() + bdf = sensor.search_beliefs(most_recent_beliefs_only=False) num_beliefs_before = len(bdf) belief_times_before = bdf.belief_times @@ -22,7 +22,7 @@ def test_drop_unchanged_beliefs(setup_beliefs): save_to_db(bdf) # Verify that no new beliefs were saved - bdf = sensor.search_beliefs() + bdf = sensor.search_beliefs(most_recent_beliefs_only=False) assert len(bdf) == num_beliefs_before # See what happens when storing all beliefs with their belief time updated @@ -32,7 +32,7 @@ def test_drop_unchanged_beliefs(setup_beliefs): save_to_db(bdf) # Verify that no new beliefs were saved - bdf = sensor.search_beliefs() + bdf = sensor.search_beliefs(most_recent_beliefs_only=False) assert len(bdf) == num_beliefs_before assert list(bdf.belief_times) == list(belief_times_before) @@ -42,7 +42,7 @@ def test_do_not_drop_beliefs_copied_by_another_source(setup_beliefs): # Set a reference for the number of beliefs stored sensor = Sensor.query.filter_by(name="epex_da").one_or_none() - bdf = sensor.search_beliefs() + bdf = sensor.search_beliefs(most_recent_beliefs_only=False) num_beliefs_before = len(bdf) # See what happens when storing all belief with their source updated @@ -53,7 +53,7 @@ def test_do_not_drop_beliefs_copied_by_another_source(setup_beliefs): save_to_db(bdf) # Verify that all the new beliefs were added - bdf = sensor.search_beliefs() + bdf = sensor.search_beliefs(most_recent_beliefs_only=False) num_beliefs_after = len(bdf) assert num_beliefs_after == 2 * num_beliefs_before @@ -68,7 +68,7 @@ def test_do_not_drop_changed_probabilistic_belief(setup_beliefs): # Set a reference for the number of beliefs stored sensor = Sensor.query.filter_by(name="epex_da").one_or_none() - bdf = sensor.search_beliefs(source="ENTSO-E") + bdf = sensor.search_beliefs(source="ENTSO-E", most_recent_beliefs_only=False) num_beliefs_before = len(bdf) # See what happens when storing a belief with more certainty one hour later @@ -91,6 +91,6 @@ def test_do_not_drop_changed_probabilistic_belief(setup_beliefs): save_to_db(new_belief) # Verify that the whole probabilistic belief was added - bdf = sensor.search_beliefs(source="ENTSO-E") + bdf = sensor.search_beliefs(source="ENTSO-E", most_recent_beliefs_only=False) num_beliefs_after = len(bdf) assert num_beliefs_after == num_beliefs_before + len(new_belief) From 2054f10b977ee8798c428b9b3ee91bc37f611b02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Tue, 18 Jan 2022 12:06:42 +0100 Subject: [PATCH 43/46] move /sensorData into /api/dev prefix (#317) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Nicolas Höning --- flexmeasures/api/dev/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/api/dev/__init__.py b/flexmeasures/api/dev/__init__.py index 144d78d6b..d4f90bf9e 100644 --- a/flexmeasures/api/dev/__init__.py +++ b/flexmeasures/api/dev/__init__.py @@ -16,7 +16,7 @@ def register_at(app: Flask): SensorAPI.register(app, route_prefix=dev_api_prefix) AssetAPI.register(app, route_prefix=dev_api_prefix) - @app.route("/sensorData", methods=["POST"]) + @app.route(f"{dev_api_prefix}/sensorData", methods=["POST"]) @auth_token_required @account_roles_accepted("MDC", "Prosumer") def post_sensor_data(): From a30b7aa92a18b0cadc64df7933c962f9c06efb88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20H=C3=B6ning?= Date: Tue, 18 Jan 2022 12:56:11 +0100 Subject: [PATCH 44/46] Issue 257 attribution requirements (#292) * update attribution for Map (adding Improve this map) and add MapBox logo * add changelog entry * use https Signed-off-by: F.N. Claessen Co-authored-by: F.N. Claessen --- documentation/changelog.rst | 1 + flexmeasures/ui/static/css/flexmeasures.css | 18 ++++++++++++++++++ flexmeasures/ui/static/js/map-init.js | 14 ++++++++++---- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 79b72c34e..780769eed 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -24,6 +24,7 @@ Infrastructure / Support * Account-based authorization, incl. new decorators for endpoints [see `PR #210 `_] * Central authorization policy which lets database models codify who can do what (permission-based) and relieve API endpoints from this [see `PR #234 `_] * Improve data specification for forecasting models using timely-beliefs data [see `PR #154 `_] +* Properly attribute Mapbox and OpenStreetMap [see `PR #292 `_] * Allow plugins to register their custom config settings, so that FlexMeasures can check whether they are set up correctly [see `PR #230 `_ and `PR #237 `_] * Add sensor method to obtain just its latest state (excl. forecasts) [see `PR #235 `_] * Migrate attributes of assets, markets and weather sensors to our new sensor model [see `PR #254 `_ and `project 9 `_] diff --git a/flexmeasures/ui/static/css/flexmeasures.css b/flexmeasures/ui/static/css/flexmeasures.css index faab9cf98..bb97d2e5b 100644 --- a/flexmeasures/ui/static/css/flexmeasures.css +++ b/flexmeasures/ui/static/css/flexmeasures.css @@ -351,6 +351,24 @@ i.center-icon:after, i.center-icon:before { opacity: 0.7; } +.mapbox-logo { + position: absolute; + display: block; + height: 20px; + width: 65px; + left: 10px; + bottom: 10px; + text-indent: -9999px; + z-index: 99999; + overflow: hidden; + + /* `background-image` contains the Mapbox logo */ + background-image: url(data:image/svg+xml;base64,<?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 80.47 20.02" style="enable-background:new 0 0 80.47 20.02;" xml:space="preserve"><style type="text/css">.st0{opacity:0.6;fill:#FFFFFF;enable-background:new    ;}.st1{opacity:0.6;enable-background:new    ;}</style><g><path class="st0" d="M79.29,13.61c0,0.11-0.09,0.2-0.2,0.2h-1.53c-0.12,0-0.23-0.06-0.29-0.16l-1.37-2.28l-1.37,2.28c-0.06,0.1-0.17,0.16-0.29,0.16h-1.53c-0.04,0-0.08-0.01-0.11-0.03c-0.09-0.06-0.12-0.18-0.06-0.27c0,0,0,0,0,0l2.31-3.5l-2.28-3.47c-0.02-0.03-0.03-0.07-0.03-0.11c0-0.11,0.09-0.2,0.2-0.2h1.53c0.12,0,0.23,0.06,0.29,0.16l1.34,2.25l1.33-2.24c0.06-0.1,0.17-0.16,0.29-0.16h1.53c0.04,0,0.08,0.01,0.11,0.03c0.09,0.06,0.12,0.18,0.06,0.27c0,0,0,0,0,0L76.96,10l2.31,3.5C79.28,13.53,79.29,13.57,79.29,13.61z"/><path class="st0" d="M63.09,9.16c-0.37-1.79-1.87-3.12-3.66-3.12c-0.98,0-1.93,0.4-2.6,1.12V3.37c0-0.12-0.1-0.22-0.22-0.22h-1.33c-0.12,0-0.22,0.1-0.22,0.22v10.21c0,0.12,0.1,0.22,0.22,0.22h1.33c0.12,0,0.22-0.1,0.22-0.22v-0.7c0.68,0.71,1.62,1.12,2.6,1.12c1.79,0,3.29-1.34,3.66-3.13C63.21,10.3,63.21,9.72,63.09,9.16L63.09,9.16z M59.12,12.41c-1.26,0-2.28-1.06-2.3-2.36V9.99c0.02-1.31,1.04-2.36,2.3-2.36s2.3,1.07,2.3,2.39S60.39,12.41,59.12,12.41z"/><path class="st0" d="M68.26,6.04c-1.89-0.01-3.54,1.29-3.96,3.13c-0.12,0.56-0.12,1.13,0,1.69c0.42,1.85,2.07,3.16,3.97,3.14c2.24,0,4.06-1.78,4.06-3.99S70.51,6.04,68.26,6.04z M68.24,12.42c-1.27,0-2.3-1.07-2.3-2.39s1.03-2.4,2.3-2.4s2.3,1.07,2.3,2.39S69.51,12.41,68.24,12.42L68.24,12.42z"/><path class="st1" d="M59.12,7.63c-1.26,0-2.28,1.06-2.3,2.36v0.06c0.02,1.31,1.04,2.36,2.3,2.36s2.3-1.07,2.3-2.39S60.39,7.63,59.12,7.63z M59.12,11.23c-0.6,0-1.09-0.53-1.11-1.19V10c0.01-0.66,0.51-1.19,1.11-1.19s1.11,0.54,1.11,1.21S59.74,11.23,59.12,11.23z"/><path class="st1" d="M68.24,7.63c-1.27,0-2.3,1.07-2.3,2.39s1.03,2.39,2.3,2.39s2.3-1.07,2.3-2.39S69.51,7.63,68.24,7.63z M68.24,11.23c-0.61,0-1.11-0.54-1.11-1.21s0.5-1.2,1.11-1.2s1.11,0.54,1.11,1.21S68.85,11.23,68.24,11.23z"/><path class="st0" d="M43.56,6.24h-1.33c-0.12,0-0.22,0.1-0.22,0.22v0.7c-0.68-0.71-1.62-1.12-2.6-1.12c-2.07,0-3.75,1.78-3.75,3.99s1.69,3.99,3.75,3.99c0.99,0,1.93-0.41,2.6-1.13v0.7c0,0.12,0.1,0.22,0.22,0.22h1.33c0.12,0,0.22-0.1,0.22-0.22V6.44c0-0.11-0.09-0.21-0.21-0.21C43.57,6.24,43.57,6.24,43.56,6.24z M42.02,10.05c-0.01,1.31-1.04,2.36-2.3,2.36s-2.3-1.07-2.3-2.39s1.03-2.4,2.29-2.4c1.27,0,2.28,1.06,2.3,2.36L42.02,10.05z"/><path class="st1" d="M39.72,7.63c-1.27,0-2.3,1.07-2.3,2.39s1.03,2.39,2.3,2.39s2.28-1.06,2.3-2.36V9.99C42,8.68,40.98,7.63,39.72,7.63z M38.62,10.02c0-0.67,0.5-1.21,1.11-1.21c0.61,0,1.09,0.53,1.11,1.19v0.04c-0.01,0.65-0.5,1.18-1.11,1.18S38.62,10.68,38.62,10.02z"/><path class="st0" d="M49.91,6.04c-0.98,0-1.93,0.4-2.6,1.12V6.45c0-0.12-0.1-0.22-0.22-0.22h-1.33c-0.12,0-0.22,0.1-0.22,0.22v10.21c0,0.12,0.1,0.22,0.22,0.22h1.33c0.12,0,0.22-0.1,0.22-0.22v-3.78c0.68,0.71,1.62,1.12,2.61,1.12c2.07,0,3.75-1.78,3.75-3.99S51.98,6.04,49.91,6.04z M49.6,12.42c-1.26,0-2.28-1.06-2.3-2.36V9.99c0.02-1.31,1.04-2.37,2.29-2.37c1.26,0,2.3,1.07,2.3,2.39S50.86,12.41,49.6,12.42L49.6,12.42z"/><path class="st1" d="M49.6,7.63c-1.26,0-2.28,1.06-2.3,2.36v0.06c0.02,1.31,1.04,2.36,2.3,2.36s2.3-1.07,2.3-2.39S50.86,7.63,49.6,7.63z M49.6,11.23c-0.6,0-1.09-0.53-1.11-1.19V10C48.5,9.34,49,8.81,49.6,8.81c0.6,0,1.11,0.55,1.11,1.21S50.21,11.23,49.6,11.23z"/><path class="st0" d="M34.36,13.59c0,0.12-0.1,0.22-0.22,0.22h-1.34c-0.12,0-0.22-0.1-0.22-0.22V9.24c0-0.93-0.7-1.63-1.54-1.63c-0.76,0-1.39,0.67-1.51,1.54l0.01,4.44c0,0.12-0.1,0.22-0.22,0.22h-1.34c-0.12,0-0.22-0.1-0.22-0.22V9.24c0-0.93-0.7-1.63-1.54-1.63c-0.81,0-1.47,0.75-1.52,1.71v4.27c0,0.12-0.1,0.22-0.22,0.22h-1.33c-0.12,0-0.22-0.1-0.22-0.22V6.44c0.01-0.12,0.1-0.21,0.22-0.21h1.33c0.12,0,0.21,0.1,0.22,0.21v0.63c0.48-0.65,1.24-1.04,2.06-1.05h0.03c1.04,0,1.99,0.57,2.48,1.48c0.43-0.9,1.33-1.48,2.32-1.49c1.54,0,2.79,1.19,2.76,2.65L34.36,13.59z"/><path class="st1" d="M80.32,12.97l-0.07-0.12L78.38,10l1.85-2.81c0.42-0.64,0.25-1.49-0.39-1.92c-0.01-0.01-0.02-0.01-0.03-0.02c-0.22-0.14-0.48-0.21-0.74-0.21h-1.53c-0.53,0-1.03,0.28-1.3,0.74l-0.32,0.53l-0.32-0.53c-0.28-0.46-0.77-0.74-1.31-0.74h-1.53c-0.57,0-1.08,0.35-1.29,0.88c-2.09-1.58-5.03-1.4-6.91,0.43c-0.33,0.32-0.62,0.69-0.85,1.09c-0.85-1.55-2.45-2.6-4.28-2.6c-0.48,0-0.96,0.07-1.41,0.22V3.37c0-0.78-0.63-1.41-1.4-1.41h-1.33c-0.77,0-1.4,0.63-1.4,1.4v3.57c-0.9-1.3-2.38-2.08-3.97-2.09c-0.7,0-1.39,0.15-2.02,0.45c-0.23-0.16-0.51-0.25-0.8-0.25h-1.33c-0.43,0-0.83,0.2-1.1,0.53c-0.02-0.03-0.04-0.05-0.07-0.08c-0.27-0.29-0.65-0.45-1.04-0.45h-1.32c-0.29,0-0.57,0.09-0.8,0.25C40.8,5,40.12,4.85,39.42,4.85c-1.74,0-3.27,0.95-4.16,2.38c-0.19-0.44-0.46-0.85-0.79-1.19c-0.76-0.77-1.8-1.19-2.88-1.19h-0.01c-0.85,0.01-1.67,0.31-2.34,0.84c-0.7-0.54-1.56-0.84-2.45-0.84h-0.03c-0.28,0-0.55,0.03-0.82,0.1c-0.27,0.06-0.53,0.15-0.78,0.27c-0.2-0.11-0.43-0.17-0.67-0.17h-1.33c-0.78,0-1.4,0.63-1.4,1.4v7.14c0,0.78,0.63,1.4,1.4,1.4h1.33c0.78,0,1.41-0.63,1.41-1.41c0,0,0,0,0,0V9.35c0.03-0.34,0.22-0.56,0.34-0.56c0.17,0,0.36,0.17,0.36,0.45v4.35c0,0.78,0.63,1.4,1.4,1.4h1.34c0.78,0,1.4-0.63,1.4-1.4l-0.01-4.35c0.06-0.3,0.24-0.45,0.33-0.45c0.17,0,0.36,0.17,0.36,0.45v4.35c0,0.78,0.63,1.4,1.4,1.4h1.34c0.78,0,1.4-0.63,1.4-1.4v-0.36c0.91,1.23,2.34,1.96,3.87,1.96c0.7,0,1.39-0.15,2.02-0.45c0.23,0.16,0.51,0.25,0.8,0.25h1.32c0.29,0,0.57-0.09,0.8-0.25v1.91c0,0.78,0.63,1.4,1.4,1.4h1.33c0.78,0,1.4-0.63,1.4-1.4v-1.69c0.46,0.14,0.94,0.22,1.42,0.21c1.62,0,3.07-0.83,3.97-2.1v0.5c0,0.78,0.63,1.4,1.4,1.4h1.33c0.29,0,0.57-0.09,0.8-0.25c0.63,0.3,1.32,0.45,2.02,0.45c1.83,0,3.43-1.05,4.28-2.6c1.47,2.52,4.71,3.36,7.22,1.89c0.17-0.1,0.34-0.21,0.5-0.34c0.21,0.52,0.72,0.87,1.29,0.86h1.53c0.53,0,1.03-0.28,1.3-0.74l0.35-0.58l0.35,0.58c0.28,0.46,0.77,0.74,1.31,0.74h1.52c0.77,0,1.39-0.63,1.38-1.39C80.47,13.38,80.42,13.17,80.32,12.97L80.32,12.97z M34.15,13.81h-1.34c-0.12,0-0.22-0.1-0.22-0.22V9.24c0-0.93-0.7-1.63-1.54-1.63c-0.76,0-1.39,0.67-1.51,1.54l0.01,4.44c0,0.12-0.1,0.22-0.22,0.22h-1.34c-0.12,0-0.22-0.1-0.22-0.22V9.24c0-0.93-0.7-1.63-1.54-1.63c-0.81,0-1.47,0.75-1.52,1.71v4.27c0,0.12-0.1,0.22-0.22,0.22h-1.33c-0.12,0-0.22-0.1-0.22-0.22V6.44c0.01-0.12,0.1-0.21,0.22-0.21h1.33c0.12,0,0.21,0.1,0.22,0.21v0.63c0.48-0.65,1.24-1.04,2.06-1.05h0.03c1.04,0,1.99,0.57,2.48,1.48c0.43-0.9,1.33-1.48,2.32-1.49c1.54,0,2.79,1.19,2.76,2.65l0.01,4.91C34.37,13.7,34.27,13.8,34.15,13.81C34.15,13.81,34.15,13.81,34.15,13.81z M43.78,13.59c0,0.12-0.1,0.22-0.22,0.22h-1.33c-0.12,0-0.22-0.1-0.22-0.22v-0.71C41.34,13.6,40.4,14,39.42,14c-2.07,0-3.75-1.78-3.75-3.99s1.69-3.99,3.75-3.99c0.98,0,1.92,0.41,2.6,1.12v-0.7c0-0.12,0.1-0.22,0.22-0.22h1.33c0.11-0.01,0.21,0.08,0.22,0.2c0,0.01,0,0.01,0,0.02V13.59z M49.91,14c-0.98,0-1.92-0.41-2.6-1.12v3.78c0,0.12-0.1,0.22-0.22,0.22h-1.33c-0.12,0-0.22-0.1-0.22-0.22V6.45c0-0.12,0.1-0.21,0.22-0.21h1.33c0.12,0,0.22,0.1,0.22,0.22v0.7c0.68-0.72,1.62-1.12,2.6-1.12c2.07,0,3.75,1.77,3.75,3.98S51.98,14,49.91,14z M63.09,10.87C62.72,12.65,61.22,14,59.43,14c-0.98,0-1.92-0.41-2.6-1.12v0.7c0,0.12-0.1,0.22-0.22,0.22h-1.33c-0.12,0-0.22-0.1-0.22-0.22V3.37c0-0.12,0.1-0.22,0.22-0.22h1.33c0.12,0,0.22,0.1,0.22,0.22v3.78c0.68-0.71,1.62-1.12,2.6-1.11c1.79,0,3.29,1.33,3.66,3.12C63.21,9.73,63.21,10.31,63.09,10.87L63.09,10.87L63.09,10.87z M68.26,14.01c-1.9,0.01-3.55-1.29-3.97-3.14c-0.12-0.56-0.12-1.13,0-1.69c0.42-1.85,2.07-3.15,3.97-3.14c2.25,0,4.06,1.78,4.06,3.99S70.5,14.01,68.26,14.01L68.26,14.01z M79.09,13.81h-1.53c-0.12,0-0.23-0.06-0.29-0.16l-1.37-2.28l-1.37,2.28c-0.06,0.1-0.17,0.16-0.29,0.16h-1.53c-0.04,0-0.08-0.01-0.11-0.03c-0.09-0.06-0.12-0.18-0.06-0.27c0,0,0,0,0,0l2.31-3.5l-2.28-3.47c-0.02-0.03-0.03-0.07-0.03-0.11c0-0.11,0.09-0.2,0.2-0.2h1.53c0.12,0,0.23,0.06,0.29,0.16l1.34,2.25l1.34-2.25c0.06-0.1,0.17-0.16,0.29-0.16h1.53c0.04,0,0.08,0.01,0.11,0.03c0.09,0.06,0.12,0.18,0.06,0.27c0,0,0,0,0,0L76.96,10l2.31,3.5c0.02,0.03,0.03,0.07,0.03,0.11C79.29,13.72,79.2,13.81,79.09,13.81C79.09,13.81,79.09,13.81,79.09,13.81L79.09,13.81z"/><path class="st0" d="M10,1.21c-4.87,0-8.81,3.95-8.81,8.81s3.95,8.81,8.81,8.81s8.81-3.95,8.81-8.81C18.81,5.15,14.87,1.21,10,1.21z M14.18,12.19c-1.84,1.84-4.55,2.2-6.38,2.2c-0.67,0-1.34-0.05-2-0.15c0,0-0.97-5.37,2.04-8.39c0.79-0.79,1.86-1.22,2.98-1.22c1.21,0,2.37,0.49,3.23,1.35C15.8,7.73,15.85,10.5,14.18,12.19z"/><path class="st1" d="M10,0.02c-5.52,0-10,4.48-10,10s4.48,10,10,10s10-4.48,10-10C19.99,4.5,15.52,0.02,10,0.02z M10,18.83c-4.87,0-8.81-3.95-8.81-8.81S5.13,1.2,10,1.2s8.81,3.95,8.81,8.81C18.81,14.89,14.87,18.83,10,18.83z"/><path class="st1" d="M14.04,5.98c-1.75-1.75-4.53-1.81-6.2-0.14C4.83,8.86,5.8,14.23,5.8,14.23s5.37,0.97,8.39-2.04C15.85,10.5,15.8,7.73,14.04,5.98z M11.88,9.87l-0.87,1.78l-0.86-1.78L8.38,9.01l1.77-0.86l0.86-1.78l0.87,1.78l1.77,0.86L11.88,9.87z"/><polygon class="st0" points="13.65,9.01 11.88,9.87 11.01,11.65 10.15,9.87 8.38,9.01 10.15,8.15 11.01,6.37 11.88,8.15 "/></g></svg>); + background-repeat: no-repeat; + background-position: 0 0; + background-size: 65px 20px; +} + /* map icon alignment */ .map-icon > i.icon-wind:before, .map-icon > i.icon-wind:after { right: -16px !important; diff --git a/flexmeasures/ui/static/js/map-init.js b/flexmeasures/ui/static/js/map-init.js index a7c836855..9bf800f66 100644 --- a/flexmeasures/ui/static/js/map-init.js +++ b/flexmeasures/ui/static/js/map-init.js @@ -2,10 +2,12 @@ // Useful functions for our asset-specific Leaflet code function addTileLayer(leafletMap, mapboxAccessToken) { + /* + Add the tile layer for FlexMeasures. + Configure tile size, Mapbox API access and attribution. + */ var tileLayer = new L.tileLayer('https://api.mapbox.com/styles/v1/{id}/tiles/{z}/{x}/{y}?access_token={accessToken}', { - attribution: 'Map data © OpenStreetMap contributors, ' + - 'CC-BY-SA, ' + - 'Imagery © Mapbox', + attribution: '© Mapbox © OpenStreetMap Improve this map', tileSize: 512, maxZoom: 18, zoomOffset: -1, @@ -13,6 +15,10 @@ function addTileLayer(leafletMap, mapboxAccessToken) { accessToken: mapboxAccessToken }); tileLayer.addTo(leafletMap); + // add link for Mapbox logo (logo added via CSS) + $("#" + leafletMap._container.id).append( + '' + ); } @@ -23,4 +29,4 @@ function clickPan(e, data) { targetPoint = assetMap.project(targetLatLng, targetZoom).subtract([0, 50]), targetLatLng = assetMap.unproject(targetPoint, targetZoom); assetMap.setView(targetLatLng, targetZoom); -} +} \ No newline at end of file From 279a3be6558649614f769173bd87a3b83da9faa6 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Wed, 19 Jan 2022 14:56:53 +0100 Subject: [PATCH 45/46] Wrap up project 9 (#320) Update main changelog and documentation notes on data model transition. * Add missing database upgrade warning for v0.7.0 Signed-off-by: F.N. Claessen * Add changelog entry (infrastructure) for unit support Signed-off-by: F.N. Claessen * Add a changelog entry (infrastructure) for improving the scheduler for asymmetric device efficiencies asymmetric EMS commitment prices (such as a different feed-in tariff) Signed-off-by: F.N. Claessen * Add changelog entry (new features) for fallback policy for Charge Points Signed-off-by: F.N. Claessen * Notes on data model transition: change order and add check marks Signed-off-by: F.N. Claessen * Notes on data model transition: add new projects and update project descriptions Signed-off-by: F.N. Claessen * Add changelog entry for Altair sensor charts, which have now become part of the UI for the first time (linked to from the assets page) Signed-off-by: F.N. Claessen * Add changelog entry for skipping auto-forecasting in case posted data does not represent a state change Signed-off-by: F.N. Claessen * Add changelog entry for fixing multi-sourced belief charts Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 7 ++++++ .../dev/note-on-datamodel-transition.rst | 22 ++++++++++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 780769eed..7c97e523b 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -10,14 +10,17 @@ v0.8.0 | November XX, 2021 New features ----------- +* Bar charts of sensor data for individual sensors, that can be navigated using a calendar [see `PR #99 `_ and `PR #290 `_] * Charts with sensor data can be requested in one of the supported [`vega-lite themes `_] (incl. a dark theme) [see `PR #221 `_] * Mobile friendly (responsive) charts of sensor data, and such charts can be requested with a custom width and height [see `PR #313 `_] * Schedulers take into account round-trip efficiency if set [see `PR #291 `_] +* Fallback policies for charging schedules of batteries and Charge Points, in cases where the solver is presented with an infeasible problem [see `PR #267 `_ and `PR #270 `_] Bugfixes ----------- * Fix recording time of schedules triggered by UDI events [see `PR #300 `_] * Set bar width of bar charts based on sensor resolution [see `PR #310 `_] +* Fix bug in sensor data charts where data from multiple sources would be stacked, which incorrectly suggested that the data should be summed, whereas the data represents alternative beliefs [see `PR #228 `_] Infrastructure / Support ---------------------- @@ -31,6 +34,9 @@ Infrastructure / Support * Migrate all time series data to our new sensor data model based on the `timely beliefs `_ lib [see `PR #286 `_ and `project 9 `_] * Support the new asset model (which describes the organisational structure, rather than sensors and data) in UI and API. Until the transition to our new data model is completed, the new API for assets is at `/api/dev/generic_assets`. [see `PR #251 `_ and `PR #290 `_] * Internal search methods return most recent beliefs by default, also for charts, which can make them load a lot faster [see `PR #307 `_ and `PR #312 `_] +* Support unit conversion for posting sensor data [see `PR #283 `_ and `PR #293 `_] +* Improve the core device scheduler to support dealing with asymmetric efficiency losses of individual devices, and with asymmetric up and down prices for deviating from previous commitments (such as a different feed-in tariff) [see `PR #291 `_] +* Stop automatically triggering forecasting jobs when API calls save nothing new to the database, thereby saving redundant computation [see `PR #303 `_] v0.7.1 | November 08, 2021 @@ -44,6 +50,7 @@ Bugfixes v0.7.0 | October 26, 2021 =========================== +.. warning:: Upgrading to this version requires running ``flexmeasures db upgrade`` (you can create a backup first with ``flexmeasures db-ops dump``). .. warning:: The config setting ``FLEXMEASURES_PLUGIN_PATHS`` has been renamed to ``FLEXMEASURES_PLUGINS``. The old name still works but is deprecated. New features diff --git a/documentation/dev/note-on-datamodel-transition.rst b/documentation/dev/note-on-datamodel-transition.rst index 37d994ae7..2f1419689 100644 --- a/documentation/dev/note-on-datamodel-transition.rst +++ b/documentation/dev/note-on-datamodel-transition.rst @@ -1,3 +1,11 @@ +.. |check_| raw:: html + + + +.. |uncheck_| raw:: html + + + .. _note_on_datamodel_transition: A note on the ongoing data model transition @@ -52,12 +60,14 @@ We made `a technical roadmap on Github Projects `_: Our data model of beliefs about timed events, timely-beliefs, is being more tightly integrated into FlexMeasures. We do this so we can take advantage of timely-belief's capabilities more and increase the focus of FlexMeasures on features. -- `Scheduling of sensors `_: We are extending our database structure for Sensors with actuator functionality, and are moving to a model store where scheduling models can be registered. We do this so we can provide better plugin support for scheduling a diverse set of devices. -- `Forecasting of sensors `_: We are revising our forecasting tooling to support fixed-viewpoint forecasts. We do this so we can better support decision moments with the most recent expectations about relevant sensors. -- `Sensor relations and GeneralizedAssets with metadata `_: We are generalizing our database structure for organising energy data, to support all sorts of sensors and relationships between them. We do this so we can better support the diverse set of use cases for energy flexibility. -- `UI views for GeneralizedAssets `_: We are updating our UI views (dashboard maps and analytics charts) according to our new database structure for organising energy data. We do this so users can customize what they want to see. -- `Deprecate old database models `_: We are deprecating the Power, Price and Weather tables in favour of the TimedBelief table, and deprecating the Asset, Market and WeatherSensor tables in favour of the Sensor and GeneralizedAsset tables. We are doing this so users can move their data to the new database model. +- |check_| `Data model based on timely beliefs `_: Our data model of beliefs about timed events, timely-beliefs, is being more tightly integrated into FlexMeasures. We do this so we can take advantage of timely-belief's capabilities more and increase the focus of FlexMeasures on features. +- |check_| `Support Sensor and Asset diversity `_: We are generalizing our database structure for organising energy data, to support all sorts of sensors and assets, and are letting users move their data to the new database model. We do this so we can better support the diverse set of use cases for energy flexibility. +- |uncheck_| `Update API endpoints for time series communication `_: We are updating our API with new endpoints for communicating time series data, thereby consolidating a few older endpoints into a better standard. We do this so we can both simplify our API and documentation, and support a diversity of sensors. +- |uncheck_| `Update CLI commands for setting up Sensors and Assets `_: We are updating our CLI commands to reflect the new database structure. We do this to facilitate setting up structure for new users. +- |uncheck_| `Update UI views for Sensors and Assets `_: We are updating our UI views (dashboard maps and analytics charts) according to our new database structure for organising energy data. We do this so users can customize what they want to see. +- |uncheck_| `Scheduling of sensors `_: We are extending our database structure for Sensors with actuator functionality, and are moving to a model store where scheduling models can be registered. We do this so we can provide better plugin support for scheduling a diverse set of devices. +- |uncheck_| `Forecasting of sensors `_: We are revising our forecasting tooling to support fixed-viewpoint forecasts. We do this so we can better support decision moments with the most recent expectations about relevant sensors. +- |uncheck_| `Deprecate old database models `_: We are deprecating the Power, Price and Weather tables in favour of the TimedBelief table, and deprecating the Asset, Market and WeatherSensor tables in favour of the Sensor and GeneralizedAsset tables. We are doing this to clean up the code and database structure. The state of the transition (January 2022, v0.8.0) From 93e481adfc796569785f16566a3d42b88eecf4bc Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Wed, 19 Jan 2022 16:44:03 +0100 Subject: [PATCH 46/46] Deprecate portfolio and analytics views (#321) Unlink that which wasn't made compatible at this point with the new data model, and is therefore essentially broken. * Remove portfolio and analytics from default menu views Signed-off-by: F.N. Claessen * Unlink portfolio and analytics from UI documentation Signed-off-by: F.N. Claessen * Unlink control page from UI documentation Signed-off-by: F.N. Claessen * Comment out links to analytics page on dashboard Signed-off-by: F.N. Claessen * Comment out links to analytics page on asset pages Signed-off-by: F.N. Claessen * Changelog entry Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 4 ++++ documentation/index.rst | 3 --- flexmeasures/ui/templates/crud/asset.html | 12 ++++++------ flexmeasures/ui/templates/crud/assets.html | 10 +++++----- flexmeasures/ui/templates/views/dashboard.html | 18 ++++++++++-------- .../ui/templates/views/new_dashboard.html | 18 ++++++++++-------- flexmeasures/utils/config_defaults.py | 2 -- 7 files changed, 35 insertions(+), 32 deletions(-) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index 7c97e523b..647e000c6 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -16,6 +16,10 @@ New features * Schedulers take into account round-trip efficiency if set [see `PR #291 `_] * Fallback policies for charging schedules of batteries and Charge Points, in cases where the solver is presented with an infeasible problem [see `PR #267 `_ and `PR #270 `_] +Deprecations +------------ +* The Portfolio and Analytics views are deprecated [see `PR #321 `_] + Bugfixes ----------- * Fix recording time of schedules triggered by UDI events [see `PR #300 `_] diff --git a/documentation/index.rst b/documentation/index.rst index b1912aa60..ace28d02b 100644 --- a/documentation/index.rst +++ b/documentation/index.rst @@ -89,9 +89,6 @@ The platform operator of FlexMeasures can be an Aggregator. :maxdepth: 1 views/dashboard - views/portfolio - views/control - views/analytics views/admin .. toctree:: diff --git a/flexmeasures/ui/templates/crud/asset.html b/flexmeasures/ui/templates/crud/asset.html index 5a19ef731..1d898342c 100644 --- a/flexmeasures/ui/templates/crud/asset.html +++ b/flexmeasures/ui/templates/crud/asset.html @@ -8,12 +8,12 @@
-
- - - - -
+ + + + + + {% if user_is_admin %}
diff --git a/flexmeasures/ui/templates/crud/assets.html b/flexmeasures/ui/templates/crud/assets.html index 4d1e6b8f9..23d3878c7 100644 --- a/flexmeasures/ui/templates/crud/assets.html +++ b/flexmeasures/ui/templates/crud/assets.html @@ -52,11 +52,11 @@

All assets owned by account {{account.name}}

{{ asset.sensors | length }}
diff --git a/flexmeasures/ui/templates/views/dashboard.html b/flexmeasures/ui/templates/views/dashboard.html index 923d96f6f..2d55cfd3b 100644 --- a/flexmeasures/ui/templates/views/dashboard.html +++ b/flexmeasures/ui/templates/views/dashboard.html @@ -87,8 +87,10 @@

{% if (FLEXMEASURES_MODE == "demo" and asset_groups[asset_group].count_all > 0) or (FLEXMEASURES_MODE != "demo" and asset_groups[asset_group].count > 0)%}

{% endif %} {% endfor %} @@ -214,12 +216,12 @@

{{ asset.display_name }}

-
-
- - - -
+ + + + + +
diff --git a/flexmeasures/ui/templates/views/new_dashboard.html b/flexmeasures/ui/templates/views/new_dashboard.html index b9e2e747b..e9609a3fb 100644 --- a/flexmeasures/ui/templates/views/new_dashboard.html +++ b/flexmeasures/ui/templates/views/new_dashboard.html @@ -24,8 +24,10 @@

{# On demo, show all non-empty groups, otherwise show all groups that are non-empty for the current user #}

{% endfor %} @@ -142,12 +144,12 @@

{{ asset.name }}

-
-
- - - -
+ + + + + +
diff --git a/flexmeasures/utils/config_defaults.py b/flexmeasures/utils/config_defaults.py index aa122f163..4bf4ca727 100644 --- a/flexmeasures/utils/config_defaults.py +++ b/flexmeasures/utils/config_defaults.py @@ -102,8 +102,6 @@ class Config(object): FLEXMEASURES_ROOT_VIEW: Union[str, List[Union[str, Tuple[str, List[str]]]]] = [] FLEXMEASURES_MENU_LISTED_VIEWS: List[Union[str, Tuple[str, List[str]]]] = [ "dashboard", - "analytics", - "portfolio", "assets", "users", ]
Name LocationCapacityResolution Asset idOwner idEntity addressOld entity address (API v1)Account idSensors {% if user_is_admin %}
@@ -36,30 +33,23 @@

All assets

{% for asset in assets: %}
- {{ asset.display_name }} + {{ asset.name }} + {% if asset.latitude and asset.longitude %} LAT: {{ "{:,.4f}".format( asset.latitude ) }} LONG: {{ "{:,.4f}".format( asset.longitude ) }} - - {{ "{:,.3f}".format( asset.capacity_in_mw ) }} MW - - {{ asset.event_resolution | naturalized_timedelta }} + {% endif %} {{ asset.id }} - {{ asset.owner_id }} - - {{ asset.entity_address }} + {{ asset.account_id }} - {{ asset.entity_address_fm0 }} + {{ asset.sensors | length }} diff --git a/flexmeasures/ui/templates/crud/user.html b/flexmeasures/ui/templates/crud/user.html index a64ccb6ff..f3811e82d 100644 --- a/flexmeasures/ui/templates/crud/user.html +++ b/flexmeasures/ui/templates/crud/user.html @@ -72,10 +72,10 @@

Overview for user {{ user.username }}

- Assets owned + Assets in account - {{ asset_count }} + {{ asset_count }}
- - - - + + + + +
{{ asset_group | capitalize }} + class="text-center{% if asset_group in aggregate_groups %} agg-group{% endif %}"> + + {{ asset_group | capitalize }} + {{ asset_group_name | capitalize }} + class="text-center{% if asset_group_name in aggregate_groups %} agg-group{% endif %}"> + + {{ asset_group_name | capitalize }} +