From 3f950d391691fc37fac0f620fad84d644199cd3e Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 9 Oct 2022 11:39:08 +0200 Subject: [PATCH 01/24] Support unit conversion for python datetime and timedelta objects Signed-off-by: F.N. Claessen --- flexmeasures/utils/unit_utils.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index 360bad072..b1aa634bf 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -8,6 +8,7 @@ Time series with fixed resolution can be converted from units of flow to units of stock (such as 'kW' to 'kWh'), and vice versa. Percentages can be converted to units of some physical capacity if a capacity is known (such as '%' to 'kWh'). """ +from __future__ import annotations from datetime import timedelta from typing import List, Optional, Union @@ -215,6 +216,14 @@ def convert_units( capacity: Optional[str] = None, ) -> Union[pd.Series, List[Union[int, float]], int, float]: """Updates data values to reflect the given unit conversion.""" + if from_unit == "datetime": + return ( + pd.to_datetime(data) - pd.Timestamp("1970-01-01", tz="utc") + ) // pd.Timedelta("1s") + if from_unit == "timedelta": + if to_unit[0].isdigit(): + return data / pd.Timedelta(to_unit) + return data / pd.Timedelta(1, to_unit) if from_unit != to_unit: from_magnitudes = ( From 57a050af8c7a59cefeee735a21810fe3921c731f Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 9 Oct 2022 11:46:00 +0200 Subject: [PATCH 02/24] Allow to filter by column when reading in beliefs from CSV Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index f4e4131f2..82d9e29c7 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -360,6 +360,18 @@ def add_initial_structure(): type=int, help="Column number with datetimes", ) +@click.option( + "--filter-column", + "filter_columns", + multiple=True, + help="Set a column number to filter data. Use together with --filter-value.", +) +@click.option( + "--filter-value", + "filter_values", + multiple=True, + help="Set a column value to filter data. Use together with --filter-column.", +) @click.option( "--delimiter", required=True, @@ -402,6 +414,8 @@ def add_beliefs( datecol: int = 0, valuecol: int = 1, beliefcol: Optional[int] = None, + filter_columns: List[int] = None, + filter_values: List[int] = None, delimiter: str = ",", decimal: str = ".", thousands: Optional[str] = None, @@ -451,6 +465,14 @@ def add_beliefs( elif beliefcol is None: kwargs["belief_time"] = server_now().astimezone(pytz.timezone(sensor.timezone)) + # Set up optional filters: + if len(filter_columns) != len(filter_values): + raise ValueError( + "The number of filter columns and filter values should be the same." + ) + filter_by_column = ( + dict(zip(filter_columns, filter_values)) if filter_columns else None + ) bdf = tb.read_csv( file, sensor, @@ -465,6 +487,7 @@ def add_beliefs( else [datecol, beliefcol, valuecol], parse_dates=True, na_values=na_values, + filter_by_column=filter_by_column, **kwargs, ) duplicate_rows = bdf.index.duplicated(keep="first") From e0040e7e34cbb235555f3a03e24a17c254806da3 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 9 Oct 2022 11:49:41 +0200 Subject: [PATCH 03/24] Allow to set a timezone for reading in timezone naive data Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 82d9e29c7..c4064f75d 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -360,6 +360,12 @@ def add_initial_structure(): type=int, help="Column number with datetimes", ) +@click.option( + "--timezone", + required=False, + default="UTC", + help="timezone as string, e.g. 'UTC' or 'Europe/Amsterdam'", +) @click.option( "--filter-column", "filter_columns", @@ -414,6 +420,7 @@ def add_beliefs( datecol: int = 0, valuecol: int = 1, beliefcol: Optional[int] = None, + timezone: Optional[str] = None, filter_columns: List[int] = None, filter_values: List[int] = None, delimiter: str = ",", @@ -487,6 +494,7 @@ def add_beliefs( else [datecol, beliefcol, valuecol], parse_dates=True, na_values=na_values, + timezone=timezone, filter_by_column=filter_by_column, **kwargs, ) From d1e3e325cab3cae38292653a26344e1df27a2fcf Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 9 Oct 2022 11:53:33 +0200 Subject: [PATCH 04/24] Allow throwing out NaN values when reading in beliefs Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index c4064f75d..7e6ace166 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -334,6 +334,12 @@ def add_initial_structure(): multiple=True, help="Additional strings to recognize as NaN values. This argument can be given multiple times.", ) +@click.option( + "--keep-default-na", + default=False, + type=bool, + help="Whether or not to keep NaN values in the data.", +) @click.option( "--nrows", required=False, @@ -416,6 +422,7 @@ def add_beliefs( allow_overwrite: bool = False, skiprows: int = 1, na_values: List[str] = None, + keep_default_na: bool = False, nrows: Optional[int] = None, datecol: int = 0, valuecol: int = 1, @@ -494,6 +501,7 @@ def add_beliefs( else [datecol, beliefcol, valuecol], parse_dates=True, na_values=na_values, + keep_default_na=keep_default_na, timezone=timezone, filter_by_column=filter_by_column, **kwargs, From 2e2580a4401445972c919e9f0d3d26d4341158c1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 14 Oct 2022 12:32:16 +0200 Subject: [PATCH 05/24] Support datetime unit conversion for aware datetimes with mixed offset Signed-off-by: F.N. Claessen --- flexmeasures/utils/unit_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index b1aa634bf..7861eacbb 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -218,7 +218,7 @@ def convert_units( """Updates data values to reflect the given unit conversion.""" if from_unit == "datetime": return ( - pd.to_datetime(data) - pd.Timestamp("1970-01-01", tz="utc") + pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") ) // pd.Timedelta("1s") if from_unit == "timedelta": if to_unit[0].isdigit(): From d9ddd0abb91fe177fb7b33e6d720051d15b2a376 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 21 Oct 2022 10:39:18 +0200 Subject: [PATCH 06/24] Raise instead of assume UTC when reading in timezone naive data without a timezone set explicitly Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 7e6ace166..d95d3fafd 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -369,7 +369,7 @@ def add_initial_structure(): @click.option( "--timezone", required=False, - default="UTC", + default=None, help="timezone as string, e.g. 'UTC' or 'Europe/Amsterdam'", ) @click.option( From 7b7fdf85dc11b74f9e141044d777855e97d4f5f9 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Fri, 14 Oct 2022 12:32:16 +0200 Subject: [PATCH 07/24] Bump timely-beliefs dependency for read_csv Signed-off-by: F.N. Claessen --- requirements/app.in | 2 +- requirements/app.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/app.in b/requirements/app.in index 9c4649540..f8781f9d0 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -28,7 +28,7 @@ tldextract pyomo>=5.6 tabulate timetomodel>=0.7.1 -timely-beliefs>=1.12 +timely-beliefs>=1.13 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index 94f3bd22e..6d0b90ff1 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -321,7 +321,7 @@ tabulate==0.8.10 # via -r requirements/app.in threadpoolctl==3.1.0 # via scikit-learn -timely-beliefs==1.11.5 +timely-beliefs==1.13.0 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in From 26b21316766b1e94b79be9a0d61b6664c31faa7b Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 17:27:50 +0100 Subject: [PATCH 08/24] Refactor: flake8 Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 19 ++++++++++++------- flexmeasures/utils/unit_utils.py | 23 ++++++++++++++++++----- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index d95d3fafd..2d9b2de1b 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -458,13 +458,7 @@ def add_beliefs( if sensor is None: print(f"Failed to create beliefs: no sensor found with ID {sensor_id}.") return - if source.isdigit(): - _source = get_source_or_none(int(source), source_type="CLI script") - if not _source: - print(f"Failed to find source {source}.") - return - else: - _source = get_or_create_source(source, source_type="CLI script") + _source = parse_source(source) # Set up optional parameters for read_csv if file.split(".")[-1].lower() == "csv": @@ -1131,3 +1125,14 @@ def check_errors(errors: Dict[str, List[str]]): f"Please correct the following errors:\n{errors}.\n Use the --help flag to learn more." ) raise click.Abort + + +def parse_source(source): + if source.isdigit(): + _source = get_source_or_none(int(source), source_type="CLI script") + if not _source: + print(f"Failed to find source {source}.") + return + else: + _source = get_or_create_source(source, source_type="CLI script") + return _source diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index 7861eacbb..2d37ff4af 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -208,14 +208,15 @@ def is_energy_price_unit(unit: str) -> bool: return False -def convert_units( +def convert_time_units( data: Union[tb.BeliefsSeries, pd.Series, List[Union[int, float]], int, float], from_unit: str, to_unit: str, - event_resolution: Optional[timedelta] = None, - capacity: Optional[str] = None, -) -> Union[pd.Series, List[Union[int, float]], int, float]: - """Updates data values to reflect the given unit conversion.""" +): + """Convert data with datetime or timedelta dtypes to float values. + + Use Unix epoch or the requested time unit, respectively. + """ if from_unit == "datetime": return ( pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") @@ -225,6 +226,18 @@ def convert_units( return data / pd.Timedelta(to_unit) return data / pd.Timedelta(1, to_unit) + +def convert_units( + data: Union[tb.BeliefsSeries, pd.Series, List[Union[int, float]], int, float], + from_unit: str, + to_unit: str, + event_resolution: Optional[timedelta] = None, + capacity: Optional[str] = None, +) -> Union[pd.Series, List[Union[int, float]], int, float]: + """Updates data values to reflect the given unit conversion.""" + if from_unit in ("datetime", "timedelta"): + return convert_time_units(data, from_unit, to_unit) + if from_unit != to_unit: from_magnitudes = ( data.to_numpy() From 6ccb2dcc435b7db0866a804665c549db623128cd Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 17:32:10 +0100 Subject: [PATCH 09/24] CLI changelog entry Signed-off-by: F.N. Claessen --- documentation/cli/change_log.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/documentation/cli/change_log.rst b/documentation/cli/change_log.rst index 574cc9b93..b45da13ab 100644 --- a/documentation/cli/change_log.rst +++ b/documentation/cli/change_log.rst @@ -4,6 +4,8 @@ FlexMeasures CLI Changelog ********************** +* Add options to ``flexmeasures add beliefs`` to 1) read CSV data with timezone naive datetimes (use ``--timezone``to localize the data), 2) read CSV data with datetime/timedelta units (use ``--unit datetime`` or ``--unit timedelta`, 3) remove rows with NaN values, and 4) add filter read-in data by matching values in specific columns (use ``--filter-column`` and ``--filter-value`` together). + since v0.11.0 | August 28, 2022 ============================== From e2eeb08e9ab2418c497c8c5e873602a684955d22 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 19:21:06 +0100 Subject: [PATCH 10/24] changelog entry Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index dd56a80ae..5458d13dd 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -12,7 +12,7 @@ New features * Ability to provide your own custom scheduling function [see `PR #505 `_] * Visually distinguish forecasts/schedules (dashed lines) from measurements (solid lines), and expand the tooltip with timing info regarding the forecast/schedule horizon or measurement lag [see `PR #503 `_] * The asset page also allows to show sensor data from other assets that belong to the same account [see `PR #500 `_] -* Improved import of time series data from CSV file: 1) drop duplicate records with warning, and 2) allow configuring which column contains explicit recording times for each data point (use case: import forecasts) [see `PR #501 `_] +* Improved import of time series data from CSV file: 1) drop duplicate records with warning, 2) allow configuring which column contains explicit recording times for each data point (use case: import forecasts) [see `PR #501 `_], 3) localize timezone naive data, 4) support reading in datetime and timedelta values, 5) remove rows with NaN values, and 6) filter by values in specific columns [see `PR #521 `_] Bugfixes ----------- From 994af8a287b5c2a8cf1bc859f61b80dbfcdfce17 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 19:25:00 +0100 Subject: [PATCH 11/24] mypy Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 2d9b2de1b..d0735f73b 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -415,6 +415,8 @@ def add_beliefs( file: str, sensor_id: int, source: str, + filter_columns: List[int], + filter_values: List[int], unit: Optional[str] = None, horizon: Optional[int] = None, cp: Optional[float] = None, @@ -428,8 +430,6 @@ def add_beliefs( valuecol: int = 1, beliefcol: Optional[int] = None, timezone: Optional[str] = None, - filter_columns: List[int] = None, - filter_values: List[int] = None, delimiter: str = ",", decimal: str = ".", thousands: Optional[str] = None, From 401bc1d183c52476e2555dc3bb6e259019f15970 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 19:31:37 +0100 Subject: [PATCH 12/24] Use sensor id field validation Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index d0735f73b..da44d125e 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -279,8 +279,9 @@ def add_initial_structure(): @click.argument("file", type=click.Path(exists=True)) @click.option( "--sensor-id", + "sensor", required=True, - type=click.IntRange(min=1), + type=SensorIdField(), help="Sensor to which the beliefs pertain.", ) @click.option( @@ -413,7 +414,7 @@ def add_initial_structure(): ) def add_beliefs( file: str, - sensor_id: int, + sensor: Sensor, source: str, filter_columns: List[int], filter_values: List[int], @@ -454,10 +455,6 @@ def add_beliefs( In case no --horizon is specified and no beliefcol is specified, the moment of executing this CLI command is taken as the time at which the beliefs were recorded. """ - sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none() - if sensor is None: - print(f"Failed to create beliefs: no sensor found with ID {sensor_id}.") - return _source = parse_source(source) # Set up optional parameters for read_csv From 6e14acf95f2c766d1a51d3cc27ac6e0c527ef7ff Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Sun, 9 Oct 2022 11:41:34 +0200 Subject: [PATCH 13/24] Querying for a source with a given id no longer requires knowing the source type Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 2 +- flexmeasures/data/queries/data_sources.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index da44d125e..9be5d4831 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -1126,7 +1126,7 @@ def check_errors(errors: Dict[str, List[str]]): def parse_source(source): if source.isdigit(): - _source = get_source_or_none(int(source), source_type="CLI script") + _source = get_source_or_none(int(source)) if not _source: print(f"Failed to find source {source}.") return diff --git a/flexmeasures/data/queries/data_sources.py b/flexmeasures/data/queries/data_sources.py index d3daded86..178891a34 100644 --- a/flexmeasures/data/queries/data_sources.py +++ b/flexmeasures/data/queries/data_sources.py @@ -42,7 +42,15 @@ def get_or_create_source( return _source -def get_source_or_none(source: int, source_type: str) -> Optional[DataSource]: - query = DataSource.query.filter(DataSource.type == source_type) +def get_source_or_none( + source: int | str, source_type: str | None = None +) -> DataSource | None: + """ + :param source: source id + :param source_type: optionally, filter by source type + """ + query = DataSource.query + if source_type is not None: + query = query.filter(DataSource.type == source_type) query = query.filter(DataSource.id == int(source)) return query.one_or_none() From 9b018dcd801955c970ec52aebdd48d045b09c8e9 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 19:59:24 +0100 Subject: [PATCH 14/24] make freeze-deps Signed-off-by: F.N. Claessen --- requirements/app.txt | 40 ++++------------------------------------ requirements/docs.txt | 6 +----- requirements/test.txt | 10 ++-------- 3 files changed, 7 insertions(+), 49 deletions(-) diff --git a/requirements/app.txt b/requirements/app.txt index 6d0b90ff1..bdd5b22ea 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -7,20 +7,11 @@ alembic==1.8.1 # via flask-migrate altair==4.2.0 - # via - # -r requirements/app.in - # timely-beliefs + # via -r requirements/app.in arrow==1.2.2 # via rq-dashboard -async-generator==1.10 - # via - # trio - # trio-websocket attrs==22.1.0 - # via - # jsonschema - # outcome - # trio + # via jsonschema babel==2.10.3 # via py-moneyed bcrypt==4.0.0 @@ -34,7 +25,6 @@ blinker==1.5 certifi==2022.6.15 # via # requests - # selenium # sentry-sdk charset-normalizer==2.1.1 # via requests @@ -114,8 +104,6 @@ fonttools==4.37.1 # via matplotlib greenlet==1.1.3 # via sqlalchemy -h11==0.13.0 - # via wsproto humanize==4.3.0 # via -r requirements/app.in idna==3.3 @@ -123,7 +111,6 @@ idna==3.3 # email-validator # requests # tldextract - # trio importlib-metadata==4.12.0 # via # -r requirements/app.in @@ -192,8 +179,6 @@ numpy==1.23.2 # uniplot openturns==1.19.post1 # via timely-beliefs -outcome==1.2.0 - # via trio packaging==21.3 # via # marshmallow @@ -202,7 +187,7 @@ packaging==21.3 # redis # statsmodels # webargs -pandas==1.2.5 +pandas==1.5.1 # via # -r requirements/app.in # altair @@ -245,8 +230,6 @@ pyparsing==3.0.9 # packaging pyrsistent==0.18.1 # via jsonschema -pysocks==1.7.1 - # via urllib3 python-dateutil==2.8.2 # via # arrow @@ -289,8 +272,6 @@ scipy==1.9.1 # statsmodels # timely-beliefs # timetomodel -selenium==4.4.3 - # via timely-beliefs sentry-sdk[flask]==1.9.5 # via -r requirements/app.in six==1.16.0 @@ -303,10 +284,6 @@ six==1.16.0 # requests-file sklearn==0.0 # via timetomodel -sniffio==1.2.0 - # via trio -sortedcontainers==2.4.0 - # via trio sqlalchemy==1.4.40 # via # -r requirements/app.in @@ -329,22 +306,15 @@ tldextract==3.3.1 # via -r requirements/app.in toolz==0.12.0 # via altair -trio==0.21.0 - # via - # selenium - # trio-websocket -trio-websocket==0.9.2 - # via selenium typing-extensions==4.3.0 # via # py-moneyed # pydantic uniplot==0.7.0 # via -r requirements/app.in -urllib3[socks]==1.26.12 +urllib3==1.26.12 # via # requests - # selenium # sentry-sdk webargs==8.2.0 # via -r requirements/app.in @@ -356,8 +326,6 @@ workalendar==16.3.0 # via -r requirements/app.in wrapt==1.14.1 # via deprecated -wsproto==1.2.0 - # via trio-websocket wtforms==3.0.1 # via flask-wtf xlrd==2.0.1 diff --git a/requirements/docs.txt b/requirements/docs.txt index f5266a513..4d7187434 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -53,10 +53,6 @@ pyparsing==3.0.9 # via # -c requirements/app.txt # packaging -pysocks==1.7.1 - # via - # -c requirements/app.txt - # urllib3 pytz==2022.2.1 # via # -c requirements/app.txt @@ -101,7 +97,7 @@ sphinxcontrib-qthelp==1.0.3 # via sphinx sphinxcontrib-serializinghtml==1.1.5 # via sphinx -urllib3[socks]==1.26.12 +urllib3==1.26.12 # via # -c requirements/app.txt # requests diff --git a/requirements/test.txt b/requirements/test.txt index 0325d220e..64f5a373a 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -70,10 +70,6 @@ pyparsing==3.0.9 # via # -c requirements/app.txt # packaging -pysocks==1.7.1 - # via - # -c requirements/app.txt - # urllib3 pytest==7.1.2 # via # -r requirements/test.in @@ -103,16 +99,14 @@ six==1.16.0 # fakeredis # requests-mock sortedcontainers==2.4.0 - # via - # -c requirements/app.txt - # fakeredis + # via fakeredis termcolor==1.1.0 # via pytest-sugar tomli==2.0.1 # via # coverage # pytest -urllib3[socks]==1.26.12 +urllib3==1.26.12 # via # -c requirements/app.txt # requests From b7ad2d677e4d226a459c3cdb022358a0f71d9b54 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Mon, 31 Oct 2022 20:08:23 +0100 Subject: [PATCH 15/24] add optional dependency: timely-beliefs[forecast] Signed-off-by: F.N. Claessen --- requirements/app.in | 2 +- requirements/app.txt | 31 +++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/requirements/app.in b/requirements/app.in index f8781f9d0..82e15f9ec 100644 --- a/requirements/app.in +++ b/requirements/app.in @@ -28,7 +28,7 @@ tldextract pyomo>=5.6 tabulate timetomodel>=0.7.1 -timely-beliefs>=1.13 +timely-beliefs[forecast]>=1.13 python-dotenv # a backport, not needed in Python3.8 importlib_metadata diff --git a/requirements/app.txt b/requirements/app.txt index bdd5b22ea..71df651f4 100644 --- a/requirements/app.txt +++ b/requirements/app.txt @@ -40,7 +40,9 @@ convertdate==2.4.0 cycler==0.11.0 # via matplotlib deprecated==1.2.13 - # via redis + # via + # redis + # sktime dill==0.3.5.1 # via openturns dnspython==2.2.1 @@ -141,6 +143,8 @@ jsonschema==4.15.0 # via altair kiwisolver==1.4.4 # via matplotlib +llvmlite==0.39.1 + # via numba lunardate==0.2.0 # via workalendar mako==1.2.2 @@ -163,16 +167,20 @@ marshmallow-sqlalchemy==0.28.1 # via -r requirements/app.in matplotlib==3.5.3 # via timetomodel -numpy==1.23.2 +numba==0.56.3 + # via sktime +numpy==1.22.4 # via # -r requirements/app.in # altair # matplotlib + # numba # pandas # patsy # properscoring # scikit-learn # scipy + # sktime # statsmodels # timely-beliefs # timetomodel @@ -191,6 +199,7 @@ pandas==1.5.1 # via # -r requirements/app.in # altair + # sktime # statsmodels # timely-beliefs # timetomodel @@ -264,11 +273,14 @@ rq==1.11.0 rq-dashboard==0.6.1 # via -r requirements/app.in scikit-learn==1.1.2 - # via sklearn -scipy==1.9.1 + # via + # sklearn + # sktime +scipy==1.8.1 # via # properscoring # scikit-learn + # sktime # statsmodels # timely-beliefs # timetomodel @@ -284,6 +296,8 @@ six==1.16.0 # requests-file sklearn==0.0 # via timetomodel +sktime==0.13.4 + # via timely-beliefs sqlalchemy==1.4.40 # via # -r requirements/app.in @@ -293,12 +307,14 @@ sqlalchemy==1.4.40 # timely-beliefs # timetomodel statsmodels==0.13.2 - # via timetomodel + # via + # sktime + # timetomodel tabulate==0.8.10 # via -r requirements/app.in threadpoolctl==3.1.0 # via scikit-learn -timely-beliefs==1.13.0 +timely-beliefs[forecast]==1.13.0 # via -r requirements/app.in timetomodel==0.7.1 # via -r requirements/app.in @@ -332,3 +348,6 @@ xlrd==2.0.1 # via -r requirements/app.in zipp==3.8.1 # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools From d1acfe5160df59cf32c8fcd6a2bde7d5485e4a7a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 14:11:36 +0100 Subject: [PATCH 16/24] Mention data conversion from 'datetime' or 'timedelta' units Signed-off-by: F.N. Claessen --- flexmeasures/cli/data_add.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 751b5e205..464f5c68c 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -295,6 +295,8 @@ def add_initial_structure(): required=False, type=str, help="Unit of the data, for conversion to the sensor unit, if possible (a string unit such as 'kW' or 'm³/h').\n" + "Measurements of time itself that are formatted as a 'datetime' or 'timedelta' can be converted to a sensor unit representing time (such as 's' or 'h'),\n" + "where datetimes are represented as a duration with respect to the UNIX epoch." "Hint: to switch the sign of the data, prepend a minus sign.\n" "For example, when assigning kW consumption data to a kW production sensor, use '-kW'.", ) From 74751fd2490ee40e427d1b118e0a73410d817054 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 14:12:45 +0100 Subject: [PATCH 17/24] Allow converting 'datetime' values to a duration other than seconds (since UNIX epoch) Signed-off-by: F.N. Claessen --- flexmeasures/utils/unit_utils.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index 2d37ff4af..bf2abe23a 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -218,9 +218,14 @@ def convert_time_units( Use Unix epoch or the requested time unit, respectively. """ if from_unit == "datetime": - return ( - pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") - ) // pd.Timedelta("1s") + if to_unit[0].isdigit(): + return ( + pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") + ) // pd.Timedelta(to_unit) + else: + return ( + pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") + ) // pd.Timedelta(1, to_unit) if from_unit == "timedelta": if to_unit[0].isdigit(): return data / pd.Timedelta(to_unit) From d1a8392ca56077c8e609c9d5d3b511f8feedc75a Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 14:16:28 +0100 Subject: [PATCH 18/24] Refactor and make convert_time_units a private function Signed-off-by: F.N. Claessen --- flexmeasures/utils/unit_utils.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index bf2abe23a..e05889d26 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -208,7 +208,7 @@ def is_energy_price_unit(unit: str) -> bool: return False -def convert_time_units( +def _convert_time_units( data: Union[tb.BeliefsSeries, pd.Series, List[Union[int, float]], int, float], from_unit: str, to_unit: str, @@ -217,19 +217,16 @@ def convert_time_units( Use Unix epoch or the requested time unit, respectively. """ + if to_unit[0].isdigit(): + to_unit = pd.Timedelta(to_unit) + else: + to_unit = pd.Timedelta(1, to_unit) if from_unit == "datetime": - if to_unit[0].isdigit(): - return ( - pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") - ) // pd.Timedelta(to_unit) - else: - return ( - pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") - ) // pd.Timedelta(1, to_unit) - if from_unit == "timedelta": - if to_unit[0].isdigit(): - return data / pd.Timedelta(to_unit) - return data / pd.Timedelta(1, to_unit) + return ( + pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") + ) // to_unit + else: + return data / to_unit def convert_units( @@ -241,7 +238,7 @@ def convert_units( ) -> Union[pd.Series, List[Union[int, float]], int, float]: """Updates data values to reflect the given unit conversion.""" if from_unit in ("datetime", "timedelta"): - return convert_time_units(data, from_unit, to_unit) + return _convert_time_units(data, from_unit, to_unit) if from_unit != to_unit: from_magnitudes = ( From e77803e157ffd8bed4dcb20462faac1085437a16 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 14:28:38 +0100 Subject: [PATCH 19/24] Refactor and add inline comment explaining why we check to_unit for a digit Signed-off-by: F.N. Claessen --- flexmeasures/utils/unit_utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/flexmeasures/utils/unit_utils.py b/flexmeasures/utils/unit_utils.py index e05889d26..23bbd8879 100644 --- a/flexmeasures/utils/unit_utils.py +++ b/flexmeasures/utils/unit_utils.py @@ -217,16 +217,15 @@ def _convert_time_units( Use Unix epoch or the requested time unit, respectively. """ - if to_unit[0].isdigit(): - to_unit = pd.Timedelta(to_unit) - else: - to_unit = pd.Timedelta(1, to_unit) + if not to_unit[0].isdigit(): + # unit abbreviations passed to pd.Timedelta need a number (so, for example, h becomes 1h) + to_unit = f"1{to_unit}" if from_unit == "datetime": return ( pd.to_datetime(data, utc=True) - pd.Timestamp("1970-01-01", tz="utc") - ) // to_unit + ) // pd.Timedelta(to_unit) else: - return data / to_unit + return data / pd.Timedelta(to_unit) def convert_units( From b3f700a5733e645eabe1adcfbb25973bbf2fede1 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 12:12:04 +0100 Subject: [PATCH 20/24] mypy: PEP 484 prohibits implicit Optional Signed-off-by: F.N. Claessen --- flexmeasures/api/common/utils/api_utils.py | 12 +++++++----- flexmeasures/api/common/utils/validators.py | 6 ++++-- flexmeasures/api/v1/implementations.py | 8 ++++++-- flexmeasures/cli/data_add.py | 3 ++- flexmeasures/data/queries/annotations.py | 4 +++- flexmeasures/ui/crud/users.py | 4 +++- 6 files changed, 25 insertions(+), 12 deletions(-) diff --git a/flexmeasures/api/common/utils/api_utils.py b/flexmeasures/api/common/utils/api_utils.py index d92c6e017..7d36299f0 100644 --- a/flexmeasures/api/common/utils/api_utils.py +++ b/flexmeasures/api/common/utils/api_utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from timely_beliefs.beliefs.classes import BeliefsDataFrame from typing import List, Sequence, Tuple, Union import copy @@ -54,8 +56,8 @@ def contains_empty_items(groups: List[List[str]]): def parse_as_list( - connection: Union[Sequence[Union[str, float]], str, float], of_type: type = None -) -> Sequence[Union[str, float, None]]: + connection: str | float | Sequence[str | float], of_type: type | None = None +) -> Sequence[str | float | None]: """ Return a list of connections (or values), even if it's just one connection (or value) """ @@ -141,7 +143,7 @@ def groups_to_dict( connection_groups: List[str], value_groups: List[List[str]], generic_asset_type_name: str, - plural_name: str = None, + plural_name: str | None = None, groups_name="groups", ) -> dict: """Put the connections and values in a dictionary and simplify if groups have identical values and/or if there is @@ -343,7 +345,7 @@ def get_sensor_by_generic_asset_type_and_location( def enqueue_forecasting_jobs( - forecasting_jobs: List[Job] = None, + forecasting_jobs: list[Job] | None = None, ): """Enqueue forecasting jobs. @@ -355,7 +357,7 @@ def enqueue_forecasting_jobs( def save_and_enqueue( data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], - forecasting_jobs: List[Job] = None, + forecasting_jobs: list[Job] | None = None, save_changed_beliefs_only: bool = True, ) -> ResponseTuple: diff --git a/flexmeasures/api/common/utils/validators.py b/flexmeasures/api/common/utils/validators.py index 7df1ce975..50ce9f22c 100644 --- a/flexmeasures/api/common/utils/validators.py +++ b/flexmeasures/api/common/utils/validators.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, timedelta from functools import wraps from typing import List, Tuple, Union, Optional @@ -228,7 +230,7 @@ def decorated_service(*args, **kwargs): def optional_user_sources_accepted( - default_source: Union[int, str, List[Union[int, str]]] = None + default_source: int | str | list[int | str] | None = None, ): """Decorator which specifies that a GET or POST request accepts an optional source or list of data sources. It parses relevant form data and sets the "user_source_ids" keyword parameter. @@ -539,7 +541,7 @@ def wrapper(*args, **kwargs): def assets_required( - generic_asset_type_name: str, plural_name: str = None, groups_name="groups" + generic_asset_type_name: str, plural_name: str | None = None, groups_name="groups" ): """Decorator which specifies that a GET or POST request must specify one or more assets. It parses relevant form data and sets the "generic_asset_name_groups" keyword param. diff --git a/flexmeasures/api/v1/implementations.py b/flexmeasures/api/v1/implementations.py index 864e51f54..669ca94fb 100644 --- a/flexmeasures/api/v1/implementations.py +++ b/flexmeasures/api/v1/implementations.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import isodate from typing import Dict, List, Optional, Tuple, Union from datetime import datetime as datetime_type, timedelta @@ -154,8 +156,10 @@ def collect_connection_and_value_groups( start: datetime_type, duration: timedelta, connection_groups: List[List[str]], - user_source_ids: Union[int, List[int]] = None, # None is interpreted as all sources - source_types: List[str] = None, + user_source_ids: int + | list[int] + | None = None, # None is interpreted as all sources + source_types: list[str] | None = None, ) -> Tuple[dict, int]: """ Code for GETting power values from the API. diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 464f5c68c..5ed0bf52d 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -1,4 +1,5 @@ """CLI Tasks for populating the database - most useful in development""" +from __future__ import annotations from datetime import datetime, timedelta from typing import Dict, List, Optional, Tuple @@ -426,7 +427,7 @@ def add_beliefs( resample: bool = True, allow_overwrite: bool = False, skiprows: int = 1, - na_values: List[str] = None, + na_values: list[str] | None = None, keep_default_na: bool = False, nrows: Optional[int] = None, datecol: int = 0, diff --git a/flexmeasures/data/queries/annotations.py b/flexmeasures/data/queries/annotations.py index a9b344a8b..dff7ef85f 100644 --- a/flexmeasures/data/queries/annotations.py +++ b/flexmeasures/data/queries/annotations.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime from typing import List, Optional @@ -15,7 +17,7 @@ def query_asset_annotations( annotations_after: Optional[datetime] = None, annotations_before: Optional[datetime] = None, sources: Optional[List[DataSource]] = None, - annotation_type: str = None, + annotation_type: str | None = None, ) -> Query: """Match annotations assigned to the given asset.""" query = Annotation.query.join(GenericAssetAnnotationRelationship).filter( diff --git a/flexmeasures/ui/crud/users.py b/flexmeasures/ui/crud/users.py index 8a39ec97c..b09adb934 100644 --- a/flexmeasures/ui/crud/users.py +++ b/flexmeasures/ui/crud/users.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Optional, Union from datetime import datetime @@ -36,7 +38,7 @@ class UserForm(FlaskForm): active = BooleanField("Activation Status", validators=[DataRequired()]) -def render_user(user: Optional[User], asset_count: int = 0, msg: str = None): +def render_user(user: User | None, asset_count: int = 0, msg: str | None = None): user_form = UserForm() user_form.process(obj=user) return render_flexmeasures_template( From 6041f4fc617f4fe985e30ec06e6178ba2e310953 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 17:55:00 +0100 Subject: [PATCH 21/24] Attempt to revert bugs introduced in merge with main Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 12 +++ flexmeasures/api/v1_3/tests/test_api_v1_3.py | 4 +- .../api/v2_0/tests/test_api_v2_0_assets.py | 2 +- .../api/v3_0/tests/test_sensor_schedules.py | 4 +- flexmeasures/cli/data_add.py | 12 ++- flexmeasures/cli/data_delete.py | 16 ++-- flexmeasures/cli/db_ops.py | 10 +-- .../data/models/planning/charging_station.py | 7 +- flexmeasures/data/models/planning/solver.py | 16 ++-- .../data/models/planning/tests/test_solver.py | 8 +- flexmeasures/data/models/user.py | 2 +- flexmeasures/data/schemas/__init__.py | 1 + flexmeasures/data/schemas/assets.py | 87 ++++++++++++++++++- flexmeasures/data/schemas/generic_assets.py | 33 +------ .../schemas/tests/test_latitude_longitudes.py | 84 ++++++++++++++++++ flexmeasures/data/utils.py | 16 ++-- flexmeasures/ui/crud/assets.py | 12 ++- flexmeasures/utils/calculations.py | 31 +++++-- flexmeasures/utils/coding_utils.py | 15 ++++ 19 files changed, 289 insertions(+), 83 deletions(-) create mode 100644 flexmeasures/data/schemas/tests/test_latitude_longitudes.py diff --git a/documentation/changelog.rst b/documentation/changelog.rst index b385a4a21..d28a3899f 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -18,14 +18,26 @@ New features Bugfixes ----------- * The CLI command ``flexmeasures show beliefs`` now supports plotting time series data that includes NaN values, and provides better support for plotting multiple sensors that do not share the same unit [see `PR #516 `_] +* Consistent CLI/UI support for asset lat/lng positions up to 7 decimal places (previously the UI rounded to 4 decimal places, whereas the CLI allowed more than 4) [see `PR #522 `_] Infrastructure / Support ---------------------- * Reduce size of Docker image (from 2GB to 1.4GB) [see `PR #512 `_] * Remove bokeh dependency and obsolete UI views [see `PR #476 `_] +* Fix ``flexmeasures db-ops dump`` and ``flexmeasures db-ops restore`` incorrectly reporting a success when `pg_dump` and `pg_restore` are not installed [see `PR #526 `_] +* Plugins can save BeliefsSeries, too, instead of just BeliefsDataFrames [see `PR #523 `_] +v0.11.3 | November 2, 2022 +============================ + +Bugfixes +----------- +* Fix scheduling with imperfect efficiencies, which resulted in exceeding the device's lower SoC limit. [see `PR #520 `_] +* Fix scheduler for Charge Points when taking into account inflexible devices [see `PR #517 `_] +* Prevent rounding asset lat/long positions to 4 decimal places when editing an asset in the UI [see `PR #522 `_] + v0.11.2 | September 6, 2022 ============================ diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3.py b/flexmeasures/api/v1_3/tests/test_api_v1_3.py index 54429774a..b771cea01 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3.py @@ -112,7 +112,9 @@ def test_post_udi_event_and_get_device_message( # check targets, if applicable if "targets" in message: start_soc = message["value"] / 1000 # in MWh - soc_schedule = integrate_time_series(consumption_schedule, start_soc, 6) + soc_schedule = integrate_time_series( + consumption_schedule, start_soc, decimal_precision=6, + ) print(consumption_schedule) print(soc_schedule) for target in message["targets"]: diff --git a/flexmeasures/api/v2_0/tests/test_api_v2_0_assets.py b/flexmeasures/api/v2_0/tests/test_api_v2_0_assets.py index 7d1a84df6..0a8c445ec 100644 --- a/flexmeasures/api/v2_0/tests/test_api_v2_0_assets.py +++ b/flexmeasures/api/v2_0/tests/test_api_v2_0_assets.py @@ -248,7 +248,7 @@ def test_post_an_asset_with_invalid_data(client, db): in post_asset_response.json["message"]["json"]["capacity_in_mw"][0] ) assert ( - "greater than or equal to -180 and less than or equal to 180" + "Longitude 300.9 exceeds the maximum longitude of 180 degrees." in post_asset_response.json["message"]["json"]["longitude"][0] ) assert "required field" in post_asset_response.json["message"]["json"]["unit"][0] diff --git a/flexmeasures/api/v3_0/tests/test_sensor_schedules.py b/flexmeasures/api/v3_0/tests/test_sensor_schedules.py index 9c9ac912a..793b17abd 100644 --- a/flexmeasures/api/v3_0/tests/test_sensor_schedules.py +++ b/flexmeasures/api/v3_0/tests/test_sensor_schedules.py @@ -90,7 +90,9 @@ def test_trigger_and_get_schedule( # check targets, if applicable if "targets" in message: start_soc = message["soc-at-start"] / 1000 # in MWh - soc_schedule = integrate_time_series(consumption_schedule, start_soc, 6) + soc_schedule = integrate_time_series( + consumption_schedule, start_soc, decimal_precision=6, + ) print(consumption_schedule) print(soc_schedule) for target in message["targets"]: diff --git a/flexmeasures/cli/data_add.py b/flexmeasures/cli/data_add.py index 5ed0bf52d..15d00dfe5 100755 --- a/flexmeasures/cli/data_add.py +++ b/flexmeasures/cli/data_add.py @@ -38,7 +38,13 @@ MissingAttributeException, ) from flexmeasures.data.models.annotations import Annotation, get_or_create_annotation -from flexmeasures.data.schemas import AwareDateTimeField, DurationField, SensorIdField +from flexmeasures.data.schemas import ( + AwareDateTimeField, + DurationField, + LatitudeField, + LongitudeField, + SensorIdField, +) from flexmeasures.data.schemas.sensors import SensorSchema from flexmeasures.data.schemas.units import QuantityField from flexmeasures.data.schemas.generic_assets import ( @@ -242,12 +248,12 @@ def add_asset_type(**args): @click.option("--name", required=True) @click.option( "--latitude", - type=float, + type=LatitudeField(), help="Latitude of the asset's location", ) @click.option( "--longitude", - type=float, + type=LongitudeField(), help="Longitude of the asset's location", ) @click.option("--account-id", type=int, required=True) diff --git a/flexmeasures/cli/data_delete.py b/flexmeasures/cli/data_delete.py index 0a65d6a7d..f99b0ff28 100644 --- a/flexmeasures/cli/data_delete.py +++ b/flexmeasures/cli/data_delete.py @@ -12,6 +12,7 @@ from flexmeasures.data.models.generic_assets import GenericAsset from flexmeasures.data.models.time_series import Sensor, TimedBelief from flexmeasures.data.schemas.generic_assets import GenericAssetIdField +from flexmeasures.data.schemas.sensors import SensorIdField from flexmeasures.data.services.users import find_user_by_email, delete_user @@ -123,7 +124,7 @@ def delete_asset_and_data(asset: GenericAsset, force: bool): Delete an asset & also its sensors and data. """ if not force: - prompt = f"Delete {asset}, including all its sensors and data?" + prompt = f"Delete {asset.__repr__()}, including all its sensors and data?" click.confirm(prompt, abort=True) db.session.delete(asset) db.session.commit() @@ -295,21 +296,18 @@ def delete_nan_beliefs(sensor_id: Optional[int] = None): @with_appcontext @click.option( "--id", - "sensor_id", - type=int, + "sensor", + type=SensorIdField(), required=True, help="Delete a single sensor and its (time series) data. Follow up with the sensor's ID.", ) def delete_sensor( - sensor_id: int, + sensor: Sensor, ): """Delete a sensor and all beliefs about it.""" - sensor = Sensor.query.get(sensor_id) - n = TimedBelief.query.filter(TimedBelief.sensor_id == sensor_id).delete() + n = TimedBelief.query.filter(TimedBelief.sensor_id == sensor.id).delete() db.session.delete(sensor) - click.confirm( - f"Really delete sensor {sensor_id}, along with {n} beliefs?", abort=True - ) + click.confirm(f"Delete {sensor.__repr__()}, along with {n} beliefs?", abort=True) db.session.commit() diff --git a/flexmeasures/cli/db_ops.py b/flexmeasures/cli/db_ops.py index a9de8da8a..fec96bbb0 100644 --- a/flexmeasures/cli/db_ops.py +++ b/flexmeasures/cli/db_ops.py @@ -95,10 +95,7 @@ def dump(): dump_filename = f"pgbackup_{db_name}_{time_of_saving}.dump" command_for_dumping = f"pg_dump --no-privileges --no-owner --data-only --format=c --file={dump_filename} {db_uri}" try: - proc = subprocess.Popen(command_for_dumping, shell=True) # , env={ - # 'PGPASSWORD': DB_PASSWORD - # }) - proc.wait() + subprocess.check_output(command_for_dumping, shell=True) click.echo(f"db dump successful: saved to {dump_filename}") except Exception as e: @@ -125,10 +122,7 @@ def restore(file: str): click.echo(f"Restoring {db_host_and_db_name} database from file {file}") command_for_restoring = f"pg_restore -d {db_uri} {file}" try: - proc = subprocess.Popen(command_for_restoring, shell=True) # , env={ - # 'PGPASSWORD': DB_PASSWORD - # }) - proc.wait() + subprocess.check_output(command_for_restoring, shell=True) click.echo("db restore successful") except Exception as e: diff --git a/flexmeasures/data/models/planning/charging_station.py b/flexmeasures/data/models/planning/charging_station.py index 40a321c63..a53df9610 100644 --- a/flexmeasures/data/models/planning/charging_station.py +++ b/flexmeasures/data/models/planning/charging_station.py @@ -110,9 +110,10 @@ def schedule_charging_station( ] if inflexible_device_sensors is None: inflexible_device_sensors = [] - device_constraints = [initialize_df(columns, start, end, resolution)] * ( - 1 + len(inflexible_device_sensors) - ) + device_constraints = [ + initialize_df(columns, start, end, resolution) + for i in range(1 + len(inflexible_device_sensors)) + ] for i, inflexible_sensor in enumerate(inflexible_device_sensors): device_constraints[i + 1]["derivative equals"] = get_power_values( query_window=(start, end), diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index e16af9488..e66dbf33a 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -46,8 +46,8 @@ def device_scheduler( # noqa C901 derivative max: maximum flow (e.g. in MW or boxes/h) derivative min: minimum flow derivative equals: exact amount of flow (we do this by clamping derivative min and derivative max) - derivative down efficiency: ratio of downwards flows (flow into EMS : flow out of device) - derivative up efficiency: ratio of upwards flows (flow into device : flow out of EMS) + derivative down efficiency: conversion efficiency of flow out of a device (flow out : stock decrease) + derivative up efficiency: conversion efficiency of flow into a device (stock increase : flow in) EMS constraints are on an EMS level. Handled constraints (listed by column name): derivative max: maximum flow derivative min: minimum flow @@ -228,10 +228,12 @@ def device_derivative_up_efficiency(m, d, j): # Add constraints as a tuple of (lower bound, value, upper bound) def device_bounds(m, d, j): + """Apply efficiencies to conversion from flow to stock change and vice versa.""" return ( m.device_min[d, j], sum( - m.device_power_down[d, k] + m.device_power_up[d, k] + m.device_power_down[d, k] / m.device_derivative_down_efficiency[d, k] + + m.device_power_up[d, k] * m.device_derivative_up_efficiency[d, k] for k in range(0, j + 1) ), m.device_max[d, j], @@ -275,12 +277,10 @@ def ems_flow_commitment_equalities(m, j): ) def device_derivative_equalities(m, d, j): - """Couple device flows to EMS flows per device, applying efficiencies.""" + """Couple device flows to EMS flows per device.""" return ( 0, - m.device_power_up[d, j] / m.device_derivative_up_efficiency[d, j] - + m.device_power_down[d, j] * m.device_derivative_down_efficiency[d, j] - - m.ems_power[d, j], + m.device_power_up[d, j] + m.device_power_down[d, j] - m.ems_power[d, j], 0, ) @@ -335,7 +335,7 @@ def cost_function(m): ) # model.pprint() + # model.display() # print(results.solver.termination_condition) # print(planned_costs) - # model.display() return planned_power_per_device, planned_costs, results diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index 15396e66b..b4cc4a7b1 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -90,7 +90,13 @@ def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): soc_max=soc_max, roundtrip_efficiency=roundtrip_efficiency, ) - soc_schedule = integrate_time_series(schedule, soc_at_start, decimal_precision=6) + soc_schedule = integrate_time_series( + schedule, + soc_at_start, + up_efficiency=roundtrip_efficiency ** 0.5, + down_efficiency=roundtrip_efficiency ** 0.5, + decimal_precision=6, + ) with pd.option_context("display.max_rows", None, "display.max_columns", 3): print(soc_schedule) diff --git a/flexmeasures/data/models/user.py b/flexmeasures/data/models/user.py index ebbbaaa40..3487638cf 100644 --- a/flexmeasures/data/models/user.py +++ b/flexmeasures/data/models/user.py @@ -67,7 +67,7 @@ class Account(db.Model, AuthModelMixin): ) def __repr__(self): - return "" % (self.name, self.id) def __acl__(self): """ diff --git a/flexmeasures/data/schemas/__init__.py b/flexmeasures/data/schemas/__init__.py index 0ae18d982..96cd296f4 100644 --- a/flexmeasures/data/schemas/__init__.py +++ b/flexmeasures/data/schemas/__init__.py @@ -1,3 +1,4 @@ +from .assets import LatitudeField, LongitudeField # noqa F401 from .generic_assets import GenericAssetIdField as AssetIdField # noqa F401 from .sensors import SensorIdField # noqa F401 from .times import AwareDateTimeField, DurationField # noqa F401 diff --git a/flexmeasures/data/schemas/assets.py b/flexmeasures/data/schemas/assets.py index db4bd6e31..79c29a2f7 100644 --- a/flexmeasures/data/schemas/assets.py +++ b/flexmeasures/data/schemas/assets.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from marshmallow import validates, ValidationError, validates_schema, fields, validate from flexmeasures.data import ma @@ -5,6 +7,87 @@ from flexmeasures.data.models.time_series import Sensor from flexmeasures.data.models.user import User from flexmeasures.data.schemas.sensors import SensorSchemaMixin +from flexmeasures.data.schemas.utils import FMValidationError, MarshmallowClickMixin + + +class LatitudeLongitudeValidator(validate.Validator): + """Validator which succeeds if the value passed has at most 7 decimal places.""" + + def __init__(self, *, error: str | None = None): + self.error = error + + def __call__(self, value): + if not round(value, 7) == value: + raise FMValidationError( + "Latitudes and longitudes are limited to 7 decimal places." + ) + return value + + +class LatitudeValidator(validate.Validator): + """Validator which succeeds if the value passed is in the range [-90, 90].""" + + def __init__(self, *, error: str | None = None, allow_none: bool = False): + self.error = error + self.allow_none = allow_none + + def __call__(self, value): + if self.allow_none and value is None: + return + if value < -90: + raise FMValidationError( + f"Latitude {value} exceeds the minimum latitude of -90 degrees." + ) + if value > 90: + raise ValidationError( + f"Latitude {value} exceeds the maximum latitude of 90 degrees." + ) + return value + + +class LongitudeValidator(validate.Validator): + """Validator which succeeds if the value passed is in the range [-180, 180].""" + + def __init__(self, *, error: str | None = None, allow_none: bool = False): + self.error = error + self.allow_none = allow_none + + def __call__(self, value): + if self.allow_none and value is None: + return + if value < -180: + raise FMValidationError( + f"Longitude {value} exceeds the minimum longitude of -180 degrees." + ) + if value > 180: + raise ValidationError( + f"Longitude {value} exceeds the maximum longitude of 180 degrees." + ) + return value + + +class LatitudeField(MarshmallowClickMixin, fields.Float): + """Field that deserializes to a latitude float with max 7 decimal places.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Insert validation into self.validators so that multiple errors can be stored. + self.validators.insert(0, LatitudeLongitudeValidator()) + self.validators.insert( + 0, LatitudeValidator(allow_none=kwargs.get("allow_none", False)) + ) + + +class LongitudeField(MarshmallowClickMixin, fields.Float): + """Field that deserializes to a longitude float with max 7 decimal places.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Insert validation into self.validators so that multiple errors can be stored. + self.validators.insert(0, LatitudeLongitudeValidator()) + self.validators.insert( + 0, LongitudeValidator(allow_none=kwargs.get("allow_none", False)) + ) class AssetSchema(SensorSchemaMixin, ma.SQLAlchemySchema): @@ -63,8 +146,8 @@ def validate_soc_constraints(self, data, **kwargs): soc_in_mwh = ma.auto_field() soc_datetime = ma.auto_field() soc_udi_event_id = ma.auto_field() - latitude = fields.Float(required=True, validate=validate.Range(min=-90, max=90)) - longitude = fields.Float(required=True, validate=validate.Range(min=-180, max=180)) + latitude = LatitudeField(allow_none=True) + longitude = LongitudeField(allow_none=True) asset_type_name = ma.auto_field(required=True) owner_id = ma.auto_field(required=True) market_id = ma.auto_field(required=True) diff --git a/flexmeasures/data/schemas/generic_assets.py b/flexmeasures/data/schemas/generic_assets.py index 14d7d81cd..c9e65882b 100644 --- a/flexmeasures/data/schemas/generic_assets.py +++ b/flexmeasures/data/schemas/generic_assets.py @@ -7,6 +7,7 @@ from flexmeasures.data import ma from flexmeasures.data.models.user import Account from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType +from flexmeasures.data.schemas import LatitudeField, LongitudeField from flexmeasures.data.schemas.utils import ( FMValidationError, MarshmallowClickMixin, @@ -35,8 +36,8 @@ class GenericAssetSchema(ma.SQLAlchemySchema): id = ma.auto_field(dump_only=True) name = fields.Str(required=True) account_id = ma.auto_field() - latitude = ma.auto_field() - longitude = ma.auto_field() + latitude = LatitudeField(allow_none=True) + longitude = LongitudeField(allow_none=True) generic_asset_type_id = fields.Integer(required=True) attributes = JSON(required=False) @@ -77,34 +78,6 @@ def validate_account(self, account_id: int): "User is not allowed to create assets for this account." ) - @validates("latitude") - def validate_latitude(self, latitude: Optional[float]): - """Validate optional latitude.""" - if latitude is None: - return - if latitude < -90: - raise ValidationError( - f"Latitude {latitude} exceeds the minimum latitude of -90 degrees." - ) - if latitude > 90: - raise ValidationError( - f"Latitude {latitude} exceeds the maximum latitude of 90 degrees." - ) - - @validates("longitude") - def validate_longitude(self, longitude: Optional[float]): - """Validate optional longitude.""" - if longitude is None: - return - if longitude < -180: - raise ValidationError( - f"Longitude {longitude} exceeds the minimum longitude of -180 degrees." - ) - if longitude > 180: - raise ValidationError( - f"Longitude {longitude} exceeds the maximum longitude of 180 degrees." - ) - class GenericAssetTypeSchema(ma.SQLAlchemySchema): """ diff --git a/flexmeasures/data/schemas/tests/test_latitude_longitudes.py b/flexmeasures/data/schemas/tests/test_latitude_longitudes.py new file mode 100644 index 000000000..dd695a56b --- /dev/null +++ b/flexmeasures/data/schemas/tests/test_latitude_longitudes.py @@ -0,0 +1,84 @@ +import pytest + +from flexmeasures.data.schemas.assets import LatitudeField, LongitudeField +from flexmeasures.data.schemas.utils import ValidationError + + +@pytest.mark.parametrize( + ("input", "exp_deserialization"), + [ + (0, 0), + (0.1234567, 0.1234567), + (-90, -90), + (90, 90), + ], +) +def test_latitude(input, exp_deserialization): + """Testing straightforward cases""" + lf = LatitudeField() + deser = lf.deserialize(input, None, None) + assert deser == exp_deserialization + assert lf.serialize("duration", {"duration": deser}) == round(input, 7) + + +@pytest.mark.parametrize( + ("input", "error_messages"), + [ + ("ninety", ["Not a valid number."]), + (90.01, ["Latitude 90.01 exceeds the maximum latitude of 90 degrees."]), + (0.12345678, ["Latitudes and longitudes are limited to 7 decimal places."]), + ( + -90.00000001, + [ + "Latitude -90.00000001 exceeds the minimum latitude of -90 degrees.", + "Latitudes and longitudes are limited to 7 decimal places.", + ], + ), + ], +) +def test_latitude_field_invalid(input, error_messages): + lf = LatitudeField() + with pytest.raises(ValidationError) as ve: + lf.deserialize(input, None, None) + assert error_messages == ve.value.messages + + +@pytest.mark.parametrize( + ("input", "exp_deserialization"), + [ + (0, 0), + (0.1234567, 0.1234567), + (-180, -180), + (180, 180), + ], +) +def test_longitude(input, exp_deserialization): + """Testing straightforward cases""" + lf = LongitudeField() + deser = lf.deserialize(input, None, None) + assert deser == exp_deserialization + assert lf.serialize("duration", {"duration": deser}) == round(input, 7) + + +@pytest.mark.parametrize( + ("input", "error_messages"), + [ + ("one-hundred-and-eighty", ["Not a valid number."]), + ( + -180.01, + ["Longitude -180.01 exceeds the minimum longitude of -180 degrees."], + ), + ( + -180.00000001, + [ + "Longitude -180.00000001 exceeds the minimum longitude of -180 degrees.", + "Latitudes and longitudes are limited to 7 decimal places.", + ], + ), + ], +) +def test_longitude_field_invalid(input, error_messages): + lf = LongitudeField() + with pytest.raises(ValidationError) as ve: + lf.deserialize(input, None, None) + assert error_messages == ve.value.messages diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index b56141e4e..521e4e728 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -1,7 +1,9 @@ +from __future__ import annotations + from typing import List, Optional, Union from flask import current_app -from timely_beliefs import BeliefsDataFrame +from timely_beliefs import BeliefsDataFrame, BeliefsSeries from flexmeasures.data import db from flexmeasures.data.models.data_sources import DataSource @@ -9,7 +11,7 @@ from flexmeasures.data.services.time_series import drop_unchanged_beliefs -def save_to_session(objects: List[db.Model], overwrite: bool = False): +def save_to_session(objects: list[db.Model], overwrite: bool = False): """Utility function to save to database, either efficiently with a bulk save, or inefficiently with a merge save.""" if not overwrite: db.session.bulk_save_objects(objects) @@ -20,8 +22,8 @@ def save_to_session(objects: List[db.Model], overwrite: bool = False): def get_data_source( data_source_name: str, - data_source_model: Optional[str] = None, - data_source_version: Optional[str] = None, + data_source_model: str | None = None, + data_source_version: str | None = None, data_source_type: str = "script", ) -> DataSource: """Make sure we have a data source. Create one if it doesn't exist, and add to session. @@ -50,7 +52,7 @@ def get_data_source( def save_to_db( - data: Union[BeliefsDataFrame, List[BeliefsDataFrame]], + data: BeliefsDataFrame | BeliefsSeries | list[BeliefsDataFrame | BeliefsSeries], bulk_save_objects: bool = False, save_changed_beliefs_only: bool = True, ) -> str: @@ -101,6 +103,10 @@ def save_to_db( # Nothing to save continue + # Convert series to frame if needed + if isinstance(timed_values, BeliefsSeries): + timed_values = timed_values.rename("event_value").to_frame() + len_before = len(timed_values) if save_changed_beliefs_only: diff --git a/flexmeasures/ui/crud/assets.py b/flexmeasures/ui/crud/assets.py index 52072f72a..e9897c9e5 100644 --- a/flexmeasures/ui/crud/assets.py +++ b/flexmeasures/ui/crud/assets.py @@ -39,12 +39,12 @@ class AssetForm(FlaskForm): name = StringField("Name") latitude = DecimalField( "Latitude", - places=4, + places=None, render_kw={"placeholder": "--Click the map or enter a latitude--"}, ) longitude = DecimalField( "Longitude", - places=4, + places=None, render_kw={"placeholder": "--Click the map or enter a longitude--"}, ) attributes = StringField("Other attributes (JSON)", default="{}") @@ -81,7 +81,13 @@ def process_api_validation_errors(self, api_response: dict): continue for field in list(self._fields.keys()): if field in list(api_response[error_header].keys()): - self._fields[field].errors.append(api_response[error_header][field]) + field_errors = api_response[error_header][field] + if isinstance(field_errors, list): + self._fields[field].errors += api_response[error_header][field] + else: + self._fields[field].errors.append( + api_response[error_header][field] + ) class NewAssetForm(AssetForm): diff --git a/flexmeasures/utils/calculations.py b/flexmeasures/utils/calculations.py index fef972f82..84cb8dd79 100644 --- a/flexmeasures/utils/calculations.py +++ b/flexmeasures/utils/calculations.py @@ -1,6 +1,7 @@ """ Calculations """ +from __future__ import annotations + from datetime import timedelta -from typing import Optional import numpy as np import pandas as pd @@ -37,11 +38,15 @@ def drop_nan_rows(a, b): def integrate_time_series( - s: pd.Series, s0: float, decimal_precision: Optional[int] = None + series: pd.Series, + initial_stock: float, + up_efficiency: float | pd.Series = 1, + down_efficiency: float | pd.Series = 1, + decimal_precision: int | None = None, ) -> pd.Series: """Integrate time series of length n and closed="left" (representing a flow) to a time series of length n+1 and closed="both" (representing a stock), - given a starting stock s0. + given an initial stock (i.e. the constant of integration). The unit of time is hours: i.e. the stock unit is flow unit times hours (e.g. a flow in kW becomes a stock in kWh). Optionally, set a decimal precision to round off the results (useful for tests failing over machine precision). @@ -63,12 +68,24 @@ def integrate_time_series( 2001-01-01 07:00:00 15.0 dtype: float64 """ - resolution = pd.to_timedelta(s.index.freq) + resolution = pd.to_timedelta(series.index.freq) + stock_change = pd.Series(data=np.NaN, index=series.index) + stock_change.loc[series > 0] = series[series > 0] * ( + up_efficiency[series > 0] + if isinstance(up_efficiency, pd.Series) + else up_efficiency + ) + stock_change.loc[series <= 0] = series[series <= 0] / ( + down_efficiency[series <= 0] + if isinstance(down_efficiency, pd.Series) + else down_efficiency + ) int_s = pd.concat( [ - pd.Series(s0, index=pd.date_range(s.index[0], periods=1)), - s.shift(1, freq=resolution).cumsum() * (resolution / timedelta(hours=1)) - + s0, + pd.Series(initial_stock, index=pd.date_range(series.index[0], periods=1)), + stock_change.shift(1, freq=resolution).cumsum() + * (resolution / timedelta(hours=1)) + + initial_stock, ] ) if decimal_precision is not None: diff --git a/flexmeasures/utils/coding_utils.py b/flexmeasures/utils/coding_utils.py index 4c75c9c02..4b4d763f8 100644 --- a/flexmeasures/utils/coding_utils.py +++ b/flexmeasures/utils/coding_utils.py @@ -1,4 +1,5 @@ import functools +import time def make_registering_decorator(foreign_decorator): @@ -114,3 +115,17 @@ def real_decorator(decoratee): def sort_dict(unsorted_dict: dict) -> dict: sorted_dict = dict(sorted(unsorted_dict.items(), key=lambda item: item[0])) return sorted_dict + + +def timeit(func): + """Decorator for printing the time it took to execute the decorated function.""" + + @functools.wraps(func) + def new_func(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + elapsed_time = time.time() - start_time + print(f"{func.__name__} finished in {int(elapsed_time * 1_000)} ms") + return result + + return new_func From 35618eedd15381f1e1731862d581bd2bbe6951e2 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 19:20:51 +0100 Subject: [PATCH 22/24] black and flake8 Signed-off-by: F.N. Claessen --- flexmeasures/api/v1_3/tests/test_api_v1_3.py | 4 +++- flexmeasures/api/v3_0/tests/test_sensor_schedules.py | 4 +++- flexmeasures/data/models/planning/tests/test_solver.py | 4 ++-- flexmeasures/data/schemas/generic_assets.py | 1 - flexmeasures/data/utils.py | 2 -- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/flexmeasures/api/v1_3/tests/test_api_v1_3.py b/flexmeasures/api/v1_3/tests/test_api_v1_3.py index b771cea01..1835e3f84 100644 --- a/flexmeasures/api/v1_3/tests/test_api_v1_3.py +++ b/flexmeasures/api/v1_3/tests/test_api_v1_3.py @@ -113,7 +113,9 @@ def test_post_udi_event_and_get_device_message( if "targets" in message: start_soc = message["value"] / 1000 # in MWh soc_schedule = integrate_time_series( - consumption_schedule, start_soc, decimal_precision=6, + consumption_schedule, + start_soc, + decimal_precision=6, ) print(consumption_schedule) print(soc_schedule) diff --git a/flexmeasures/api/v3_0/tests/test_sensor_schedules.py b/flexmeasures/api/v3_0/tests/test_sensor_schedules.py index 793b17abd..d133671c3 100644 --- a/flexmeasures/api/v3_0/tests/test_sensor_schedules.py +++ b/flexmeasures/api/v3_0/tests/test_sensor_schedules.py @@ -91,7 +91,9 @@ def test_trigger_and_get_schedule( if "targets" in message: start_soc = message["soc-at-start"] / 1000 # in MWh soc_schedule = integrate_time_series( - consumption_schedule, start_soc, decimal_precision=6, + consumption_schedule, + start_soc, + decimal_precision=6, ) print(consumption_schedule) print(soc_schedule) diff --git a/flexmeasures/data/models/planning/tests/test_solver.py b/flexmeasures/data/models/planning/tests/test_solver.py index b4cc4a7b1..0295079eb 100644 --- a/flexmeasures/data/models/planning/tests/test_solver.py +++ b/flexmeasures/data/models/planning/tests/test_solver.py @@ -93,8 +93,8 @@ def test_battery_solver_day_2(add_battery_assets, roundtrip_efficiency: float): soc_schedule = integrate_time_series( schedule, soc_at_start, - up_efficiency=roundtrip_efficiency ** 0.5, - down_efficiency=roundtrip_efficiency ** 0.5, + up_efficiency=roundtrip_efficiency**0.5, + down_efficiency=roundtrip_efficiency**0.5, decimal_precision=6, ) diff --git a/flexmeasures/data/schemas/generic_assets.py b/flexmeasures/data/schemas/generic_assets.py index c9e65882b..b7aa0b86a 100644 --- a/flexmeasures/data/schemas/generic_assets.py +++ b/flexmeasures/data/schemas/generic_assets.py @@ -1,4 +1,3 @@ -from typing import Optional import json from marshmallow import validates, validates_schema, ValidationError, fields diff --git a/flexmeasures/data/utils.py b/flexmeasures/data/utils.py index 521e4e728..90a83f9dc 100644 --- a/flexmeasures/data/utils.py +++ b/flexmeasures/data/utils.py @@ -1,7 +1,5 @@ from __future__ import annotations -from typing import List, Optional, Union - from flask import current_app from timely_beliefs import BeliefsDataFrame, BeliefsSeries From 6e1510af0979d3d646363cc2b460c349e88abfc3 Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 19:25:51 +0100 Subject: [PATCH 23/24] A few more reverts Signed-off-by: F.N. Claessen --- documentation/changelog.rst | 1 + flexmeasures/data/models/planning/solver.py | 5 +---- ...est_latitude_longitudes.py => test_latitude_longitude.py} | 0 3 files changed, 2 insertions(+), 4 deletions(-) rename flexmeasures/data/schemas/tests/{test_latitude_longitudes.py => test_latitude_longitude.py} (100%) diff --git a/documentation/changelog.rst b/documentation/changelog.rst index d28a3899f..2c8573856 100644 --- a/documentation/changelog.rst +++ b/documentation/changelog.rst @@ -38,6 +38,7 @@ Bugfixes * Fix scheduler for Charge Points when taking into account inflexible devices [see `PR #517 `_] * Prevent rounding asset lat/long positions to 4 decimal places when editing an asset in the UI [see `PR #522 `_] + v0.11.2 | September 6, 2022 ============================ diff --git a/flexmeasures/data/models/planning/solver.py b/flexmeasures/data/models/planning/solver.py index e66dbf33a..42c8acf29 100644 --- a/flexmeasures/data/models/planning/solver.py +++ b/flexmeasures/data/models/planning/solver.py @@ -321,10 +321,7 @@ def cost_function(m): planned_costs = value(model.costs) planned_power_per_device = [] for d in model.d: - planned_device_power = [ - model.device_power_down[d, j].value + model.device_power_up[d, j].value - for j in model.j - ] + planned_device_power = [model.ems_power[d, j].value for j in model.j] planned_power_per_device.append( pd.Series( index=pd.date_range( diff --git a/flexmeasures/data/schemas/tests/test_latitude_longitudes.py b/flexmeasures/data/schemas/tests/test_latitude_longitude.py similarity index 100% rename from flexmeasures/data/schemas/tests/test_latitude_longitudes.py rename to flexmeasures/data/schemas/tests/test_latitude_longitude.py From 100f05c07f9e1cff212defe58d2feebbf3005dcb Mon Sep 17 00:00:00 2001 From: "F.N. Claessen" Date: Thu, 10 Nov 2022 20:02:31 +0100 Subject: [PATCH 24/24] Fix typos Signed-off-by: F.N. Claessen --- documentation/cli/change_log.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/cli/change_log.rst b/documentation/cli/change_log.rst index 811ba382c..7c7dab958 100644 --- a/documentation/cli/change_log.rst +++ b/documentation/cli/change_log.rst @@ -8,7 +8,7 @@ since v0.12.0 | November XX, 2022 ================================= * Add ``--resolution``, ``--timezone`` and ``--to-file`` options to ``flexmeasures show beliefs``, to show beliefs data in a custom resolution and/or timezone, and also to save shown beliefs data to a CSV file. -* Add options to ``flexmeasures add beliefs`` to 1) read CSV data with timezone naive datetimes (use ``--timezone``to localize the data), 2) read CSV data with datetime/timedelta units (use ``--unit datetime`` or ``--unit timedelta`, 3) remove rows with NaN values, and 4) add filter read-in data by matching values in specific columns (use ``--filter-column`` and ``--filter-value`` together). +* Add options to ``flexmeasures add beliefs`` to 1) read CSV data with timezone naive datetimes (use ``--timezone`` to localize the data), 2) read CSV data with datetime/timedelta units (use ``--unit datetime`` or ``--unit timedelta``, 3) remove rows with NaN values, and 4) add filter to read-in data by matching values in specific columns (use ``--filter-column`` and ``--filter-value`` together). * Fix ``flexmeasures db-ops dump`` and ``flexmeasures db-ops restore`` incorrectly reporting a success when `pg_dump` and `pg_restore` are not installed. since v0.11.0 | August 28, 2022