-
Notifications
You must be signed in to change notification settings - Fork 38
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
API endpoint for scheduling asset #1065
base: main
Are you sure you want to change the base?
Changes from 57 commits
e2fced5
bfdd94f
5795e1b
2e4cfe0
2e28ed9
f3711de
305d76d
f6e8075
4bf91dc
0917896
753b411
ba90dd1
af4debf
49a1a12
8a3e112
25d1180
21ac09a
b4ad01a
5b4c30f
484a022
8374a9c
4621ef6
ff22fea
be51f1b
061e1f3
6eb122b
b204e4b
22834ca
dc07b11
b583c89
d04bed0
5fd2f63
ee5e19e
c2bf1af
529baa2
1665974
8678a0d
f42ceaf
7b5b3ce
e9e6ec9
378b0a7
754502a
e1e1019
4f86bb1
de811ec
175cd18
ffe82b1
45592c7
99bd8da
44cfd0c
ee08883
a9ddce8
f340a95
4c6c52a
b328921
b40d40e
b7961f4
429d97f
8423522
c60f5cf
8ba79aa
9f5eb6a
71da5c4
07b44b6
a25ab4d
5f9a4dc
1f26fa2
8b0c0d0
edf38a8
05064ae
c7e58cb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,21 +1,34 @@ | ||
from __future__ import annotations | ||
|
||
from datetime import datetime, timedelta | ||
import json | ||
|
||
from flask import current_app | ||
from flask_classful import FlaskView, route | ||
from flask_security import auth_required | ||
from flask_json import as_json | ||
from marshmallow import fields | ||
from marshmallow import fields, ValidationError | ||
from webargs.flaskparser import use_kwargs, use_args | ||
from sqlalchemy import select, delete | ||
|
||
from flexmeasures.auth.decorators import permission_required_for_context | ||
from flexmeasures.data import db | ||
from flexmeasures.data.models.user import Account | ||
from flexmeasures.data.models.generic_assets import GenericAsset | ||
from flexmeasures.data.schemas import AwareDateTimeField | ||
from flexmeasures.data.schemas.generic_assets import GenericAssetSchema as AssetSchema | ||
from flexmeasures.api.common.schemas.generic_assets import AssetIdField | ||
from flexmeasures.data.schemas.generic_assets import ( | ||
GenericAssetSchema as AssetSchema, | ||
GenericAssetIdField as AssetIdField, | ||
) | ||
from flexmeasures.data.schemas.scheduling import AssetTriggerSchema | ||
from flexmeasures.data.schemas.times import AwareDateTimeField | ||
from flexmeasures.data.services.scheduling import ( | ||
create_sequential_scheduling_job, | ||
) | ||
from flexmeasures.api.common.schemas.users import AccountIdField | ||
from flexmeasures.api.common.responses import ( | ||
invalid_flex_config, | ||
request_processed, | ||
) | ||
from flexmeasures.utils.coding_utils import flatten_unique | ||
from flexmeasures.ui.utils.view_utils import set_session_variables | ||
|
||
|
@@ -146,7 +159,9 @@ def post(self, asset_data: dict): | |
return asset_schema.dump(asset), 201 | ||
|
||
@route("/<id>", methods=["GET"]) | ||
@use_kwargs({"asset": AssetIdField(data_key="id")}, location="path") | ||
@use_kwargs( | ||
{"asset": AssetIdField(data_key="id", status_if_not_found=404)}, location="path" | ||
) | ||
@permission_required_for_context("read", ctx_arg_name="asset") | ||
@as_json | ||
def fetch_one(self, id, asset): | ||
|
@@ -182,7 +197,10 @@ def fetch_one(self, id, asset): | |
|
||
@route("/<id>", methods=["PATCH"]) | ||
@use_args(partial_asset_schema) | ||
@use_kwargs({"db_asset": AssetIdField(data_key="id")}, location="path") | ||
@use_kwargs( | ||
{"db_asset": AssetIdField(data_key="id", status_if_not_found=404)}, | ||
location="path", | ||
) | ||
@permission_required_for_context("update", ctx_arg_name="db_asset") | ||
@as_json | ||
def patch(self, asset_data: dict, id: int, db_asset: GenericAsset): | ||
|
@@ -238,7 +256,9 @@ def patch(self, asset_data: dict, id: int, db_asset: GenericAsset): | |
return asset_schema.dump(db_asset), 200 | ||
|
||
@route("/<id>", methods=["DELETE"]) | ||
@use_kwargs({"asset": AssetIdField(data_key="id")}, location="path") | ||
@use_kwargs( | ||
{"asset": AssetIdField(data_key="id", status_if_not_found=404)}, location="path" | ||
) | ||
@permission_required_for_context("delete", ctx_arg_name="asset") | ||
@as_json | ||
def delete(self, id: int, asset: GenericAsset): | ||
|
@@ -265,7 +285,7 @@ def delete(self, id: int, asset: GenericAsset): | |
|
||
@route("/<id>/chart", strict_slashes=False) # strict on next version? see #1014 | ||
@use_kwargs( | ||
{"asset": AssetIdField(data_key="id")}, | ||
{"asset": AssetIdField(data_key="id", status_if_not_found=404)}, | ||
location="path", | ||
) | ||
@use_kwargs( | ||
|
@@ -295,7 +315,7 @@ def get_chart(self, id: int, asset: GenericAsset, **kwargs): | |
"/<id>/chart_data", strict_slashes=False | ||
) # strict on next version? see #1014 | ||
@use_kwargs( | ||
{"asset": AssetIdField(data_key="id")}, | ||
{"asset": AssetIdField(data_key="id", status_if_not_found=404)}, | ||
location="path", | ||
) | ||
@use_kwargs( | ||
|
@@ -318,3 +338,173 @@ def get_chart_data(self, id: int, asset: GenericAsset, **kwargs): | |
""" | ||
sensors = flatten_unique(asset.sensors_to_show) | ||
return asset.search_beliefs(sensors=sensors, as_json=True, **kwargs) | ||
|
||
@route("/<id>/schedules/trigger", methods=["POST"]) | ||
@use_args(AssetTriggerSchema(), location="args_and_json", as_kwargs=True) | ||
# Simplification of checking for create-children access on each of the flexible sensors, | ||
# which assumes each of the flexible sensors belongs to the given asset. | ||
@permission_required_for_context("create-children", ctx_arg_name="asset") | ||
def trigger_schedule( | ||
self, | ||
asset: GenericAsset, | ||
start_of_schedule: datetime, | ||
duration: timedelta, | ||
belief_time: datetime | None = None, | ||
flex_model: dict | None = None, | ||
flex_context: dict | None = None, | ||
**kwargs, | ||
): | ||
""" | ||
Trigger FlexMeasures to create a schedule for a collection of flexible devices. | ||
|
||
.. :quickref: Schedule; Trigger scheduling job for multiple devices | ||
|
||
Trigger FlexMeasures to create a schedule for this asset. | ||
The assumption is that this is a flexible asset containing multiple power sensors. | ||
|
||
In this request, you can describe: | ||
|
||
- the schedule's main features (when does it start, what unit should it report, prior to what time can we assume knowledge) | ||
- the flexibility models for the asset's relevant sensors (state and constraint variables, e.g. current state of charge of a battery, or connection capacity) | ||
- the flexibility context which the asset operates in (other sensors under the same EMS which are relevant, e.g. prices) | ||
|
||
For details on flexibility model and context, see :ref:`describing_flexibility`. | ||
Below, we'll also list some examples. | ||
|
||
.. note:: This endpoint support scheduling an EMS with multiple flexible sensors at once, | ||
but internally, it does so sequentially | ||
(considering already scheduled sensors as inflexible). | ||
|
||
The length of the schedule can be set explicitly through the 'duration' field. | ||
Otherwise, it is set by the config setting :ref:`planning_horizon_config`, which defaults to 48 hours. | ||
If the flex-model contains targets that lie beyond the planning horizon, the length of the schedule is extended to accommodate them. | ||
Finally, the schedule length is limited by :ref:`max_planning_horizon_config`, which defaults to 2520 steps of each sensor's resolution. | ||
Targets that exceed the max planning horizon are not accepted. | ||
|
||
The appropriate algorithm is chosen by FlexMeasures (based on asset type). | ||
It's also possible to use custom schedulers and custom flexibility models, see :ref:`plugin_customization`. | ||
|
||
If you have ideas for algorithms that should be part of FlexMeasures, let us know: https://flexmeasures.io/get-in-touch/ | ||
|
||
**Example request A** | ||
|
||
This message triggers a schedule for a storage asset, starting at 10.00am, at which the state of charge (soc) is 12.1 kWh. | ||
|
||
.. code-block:: json | ||
|
||
{ | ||
"start": "2015-06-02T10:00:00+00:00", | ||
"flex-model": [ | ||
{ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This example should be updated with the new schema changes (sensor_flex_model field, etc) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That |
||
"sensor": 931, | ||
"soc-at-start": 12.1, | ||
"soc-unit": "kWh" | ||
} | ||
] | ||
} | ||
|
||
**Example request B** | ||
|
||
This message triggers a 24-hour schedule for a storage asset, starting at 10.00am, | ||
at which the state of charge (soc) is 12.1 kWh, with a target state of charge of 25 kWh at 4.00pm. | ||
|
||
The charging efficiency is constant (120%) and the discharging efficiency is determined by the contents of sensor | ||
with id 98. If just the ``roundtrip-efficiency`` is known, it can be described with its own field. | ||
The global minimum and maximum soc are set to 10 and 25 kWh, respectively. | ||
To guarantee a minimum SOC in the period prior, the sensor with ID 300 contains beliefs at 2.00pm and 3.00pm, for 15kWh and 20kWh, respectively. | ||
Storage efficiency is set to 99.99%, denoting the state of charge left after each time step equal to the sensor's resolution. | ||
Aggregate consumption (of all devices within this EMS) should be priced by sensor 9, | ||
and aggregate production should be priced by sensor 10, | ||
where the aggregate power flow in the EMS is described by the sum over sensors 13, 14 and 15 | ||
(plus the flexible sensor being optimized, of course). | ||
|
||
|
||
The battery consumption power capacity is limited by sensor 42 and the production capacity is constant (30 kW). | ||
Finally, the site consumption capacity is limited by sensor 32. | ||
|
||
Note that, if forecasts for sensors 13, 14 and 15 are not available, a schedule cannot be computed. | ||
|
||
.. code-block:: json | ||
|
||
{ | ||
"start": "2015-06-02T10:00:00+00:00", | ||
"duration": "PT24H", | ||
"flex-model": [ | ||
{ | ||
"sensor": 931, | ||
"soc-at-start": 12.1, | ||
"soc-unit": "kWh", | ||
"soc-targets": [ | ||
{ | ||
"value": 25, | ||
"datetime": "2015-06-02T16:00:00+00:00" | ||
}, | ||
], | ||
"soc-minima": {"sensor" : 300}, | ||
"soc-min": 10, | ||
"soc-max": 25, | ||
"charging-efficiency": "120%", | ||
"discharging-efficiency": {"sensor": 98}, | ||
"storage-efficiency": 0.9999, | ||
"power-capacity": "25kW", | ||
"consumption-capacity" : {"sensor": 42}, | ||
"production-capacity" : "30 kW" | ||
}, | ||
], | ||
"flex-context": { | ||
"consumption-price-sensor": 9, | ||
"production-price-sensor": 10, | ||
"inflexible-device-sensors": [13, 14, 15], | ||
"site-power-capacity": "100kW", | ||
"site-production-capacity": "80kW", | ||
"site-consumption-capacity": {"sensor": 32} | ||
} | ||
} | ||
|
||
**Example response** | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. TODO |
||
|
||
This message indicates that the scheduling request has been processed without any error. | ||
A scheduling job has been created with some Universally Unique Identifier (UUID), | ||
which will be picked up by a worker. | ||
The given UUID may be used to obtain the resulting schedule: see /assets/<id>/schedules/<uuid>. | ||
|
||
.. sourcecode:: json | ||
|
||
{ | ||
"status": "PROCESSED", | ||
"schedule": "364bfd06-c1fa-430b-8d25-8f5a547651fb", | ||
"message": "Request has been processed." | ||
} | ||
|
||
:reqheader Authorization: The authentication token | ||
:reqheader Content-Type: application/json | ||
:resheader Content-Type: application/json | ||
:status 200: PROCESSED | ||
:status 400: INVALID_DATA | ||
:status 401: UNAUTHORIZED | ||
:status 403: INVALID_SENDER | ||
:status 405: INVALID_METHOD | ||
:status 422: UNPROCESSABLE_ENTITY | ||
""" | ||
end_of_schedule = start_of_schedule + duration | ||
|
||
scheduler_kwargs = dict( | ||
start=start_of_schedule, | ||
end=end_of_schedule, | ||
belief_time=belief_time, # server time if no prior time was sent | ||
flex_model=flex_model, | ||
flex_context=flex_context, | ||
) | ||
try: | ||
jobs = create_sequential_scheduling_job( | ||
asset_or_sensor=asset, enqueue=True, **scheduler_kwargs | ||
) | ||
except ValidationError as err: | ||
return invalid_flex_config(err.messages) | ||
except ValueError as err: | ||
return invalid_flex_config(str(err)) | ||
|
||
# todo: make a 'done job' and pass that job's ID here | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Technically, I guess I now implemented this todo, by putting the last job ID (that of the 'done job') in the response. However, I recall we discussed returning a full list of job IDs, so that the API user can use the existing For example: {
"schedules": [
{
"sensor": 1,
"schedule": "<uuid>"
},
{
"sensor": 2,
"schedule": "<uuid>"
}
]
} There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. True, returning a list of schedules would be the way to go. That way, we can reuse the endpoint to get the schedules. |
||
response = dict(schedule=jobs[-1].id) | ||
d, s = request_processed() | ||
return dict(**response, **d), s |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The trigger can also deal with inflexible devices so this is incomplete.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The first part is true. However, no schedules are created for the inflexible devices.