edit2
ShahlaHuseynova 2 years ago
parent aab3f9541f
commit 3edb315938

@ -0,0 +1,3 @@
{
"CurrentProjectSetting": null
}

@ -0,0 +1,11 @@
{
"ExpandedNodes": [
"",
"\\pyrecoy",
"\\pyrecoy\\pyrecoy",
"\\pyrecoy\\pyrecoy\\data",
"\\pyrecoy\\pyrecoy\\data\\tax_tariffs"
],
"SelectedNode": "\\pyrecoy\\pyrecoy\\prices.py",
"PreviewInSolutionExplorer": false
}

Binary file not shown.

@ -0,0 +1,100 @@
{
"Version": 1,
"WorkspaceRootPath": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\",
"Documents": [
{
"AbsoluteMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\prices.py||{8B382828-6202-11D1-8870-0000F87579D2}",
"RelativeMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|solutionrelative:pyrecoy\\pyrecoy\\prices.py||{8B382828-6202-11D1-8870-0000F87579D2}"
},
{
"AbsoluteMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_horticulture_eb.json||{90A6B3A7-C1A3-4009-A288-E2FF89E96FA0}",
"RelativeMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|solutionrelative:pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_horticulture_eb.json||{90A6B3A7-C1A3-4009-A288-E2FF89E96FA0}"
},
{
"AbsoluteMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_eb.json||{90A6B3A7-C1A3-4009-A288-E2FF89E96FA0}",
"RelativeMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|solutionrelative:pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_eb.json||{90A6B3A7-C1A3-4009-A288-E2FF89E96FA0}"
},
{
"AbsoluteMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\electricity_eb.json||{90A6B3A7-C1A3-4009-A288-E2FF89E96FA0}",
"RelativeMoniker": "D:0:0:{A2FE74E1-B743-11D0-AE1A-00A0C90FFFC3}|\u003CMiscFiles\u003E|solutionrelative:pyrecoy\\pyrecoy\\data\\tax_tariffs\\electricity_eb.json||{90A6B3A7-C1A3-4009-A288-E2FF89E96FA0}"
}
],
"DocumentGroupContainers": [
{
"Orientation": 0,
"VerticalTabListWidth": 256,
"DocumentGroups": [
{
"DockedWidth": 200,
"SelectedChildIndex": 6,
"Children": [
{
"$type": "Bookmark",
"Name": "ST:129:0:{1fc202d4-d401-403c-9834-5b218574bb67}"
},
{
"$type": "Bookmark",
"Name": "ST:128:0:{116d2292-e37d-41cd-a077-ebacac4c8cc4}"
},
{
"$type": "Bookmark",
"Name": "ST:130:0:{116d2292-e37d-41cd-a077-ebacac4c8cc4}"
},
{
"$type": "Document",
"DocumentIndex": 2,
"Title": "gas_eb.json",
"DocumentMoniker": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_eb.json",
"RelativeDocumentMoniker": "pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_eb.json",
"ToolTip": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_eb.json",
"RelativeToolTip": "pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_eb.json",
"ViewState": "AQIAAEMAAAAAAAAAAAAAAE8AAAADAAAA",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.001642|",
"WhenOpened": "2024-04-22T13:54:50.515Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 1,
"Title": "gas_horticulture_eb.json",
"DocumentMoniker": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_horticulture_eb.json",
"RelativeDocumentMoniker": "pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_horticulture_eb.json",
"ToolTip": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_horticulture_eb.json",
"RelativeToolTip": "pyrecoy\\pyrecoy\\data\\tax_tariffs\\gas_horticulture_eb.json",
"ViewState": "AQIAAAAAAAAAAAAAAAAAAAAAAABhAAAA",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.001642|",
"WhenOpened": "2024-04-22T13:54:38.231Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 3,
"Title": "electricity_eb.json",
"DocumentMoniker": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\electricity_eb.json",
"RelativeDocumentMoniker": "pyrecoy\\pyrecoy\\data\\tax_tariffs\\electricity_eb.json",
"ToolTip": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\data\\tax_tariffs\\electricity_eb.json",
"RelativeToolTip": "pyrecoy\\pyrecoy\\data\\tax_tariffs\\electricity_eb.json",
"ViewState": "AQIAACgAAAAAAAAAAAAIwEEAAAABAAAA",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.001642|",
"WhenOpened": "2024-04-22T08:40:35.654Z",
"EditorCaption": ""
},
{
"$type": "Document",
"DocumentIndex": 0,
"Title": "prices.py",
"DocumentMoniker": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\prices.py",
"RelativeDocumentMoniker": "pyrecoy\\pyrecoy\\prices.py",
"ToolTip": "C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\asset-case-studies\\pyrecoy\\pyrecoy\\pyrecoy\\prices.py",
"RelativeToolTip": "pyrecoy\\pyrecoy\\prices.py",
"ViewState": "AQIAALEBAAAAAAAAAAASwMoBAAAAAAAA",
"Icon": "ae27a6b0-e345-4288-96df-5eaf394ee369.002457|",
"WhenOpened": "2024-04-16T14:15:11.652Z",
"EditorCaption": ""
}
]
}
]
}
]
}

Binary file not shown.

@ -0,0 +1,6 @@
*.egg-info
.vscode
__pycache__
*.__pycache__
*.ipynb_checkpoints
*.pytest_cache

@ -0,0 +1,25 @@
# The _pyrecoy_ Package
Modelling framework and tools and modelling of flexible assets on energy markets.
## Getting started:
### Prerequisites
* It is recommended to set-up your Python development environment according to [this Wiki page](https://gitlab.com/recoy-internal/pyrecoy-package/-/wikis/Recommended-Development-Environment)
* The _pyrecoy_ package is best used in a Jupyter Lab / Notebooks environment
* Environment variables:
* `ICE_USERNAME` and `ICE_PASSWORD` login credentials to https://www.ice.if5.com are required if you want to use TTF (gas) and ETS (CO2) prices in your model.
* `FORECAST_DATA_FOLDER`: Local path to the "Forecast Data" folder on the Sharepoint server. This is required if you want to use forecast/price data in your model, e.g. `C:/Users/username/Recoy/Recoy - Documents/03 - Libraries/12 - Data Management/Forecast Data/`
* Jupyter Lab extensions:
* `ipywidgets` and the [Plotly extension ](https://plotly.com/python/getting-started/#jupyterlab-support-python-35) may be needed to view all graphs as intended.
### Installation
__For usage in specific project only__
* Clone the repo to your project directory
__For global installation in your Python environment__
* Run `pip install git+ssh://git@gitlab.com/recoy-internal/pyrecoy-package.git`
* You should be able `import pyrecoy` package in any Python script using your environment
## Usage:
...

@ -0,0 +1,35 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "56a3f3f4-59a6-441f-96c7-f44845539991",
"metadata": {},
"outputs": [],
"source": [
"from pyrecoy.colors import *"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1 @@
from .database.Models.base import *

@ -0,0 +1,789 @@
import warnings
from functools import partial, lru_cache
from numbers import Number
from itertools import count
import numpy as np
from numpy.polynomial import Polynomial
from scipy.optimize import minimize_scalar
from .converters import *
class Asset:
"""Generic class for producing/consuming assets. Specific asset classes can
inherit from this class.
Parameters:
-----------
max_power : int/float
Maximum asset power in MW electric
min_power : int/float
Minimium asset load in MW electric
Usage:
------
Use the set_load and get_load methods to set and get asset status in MW.
Convention is negative values for inputs (consumption) and positive
values for outputs (production).
"""
_freq_to_multiplier = {"H": 1, "15T": (1 / 4), "1T": (1 / 60)}
_ids = count(0)
def __init__(self, name, max_power, min_power):
if min_power > max_power:
raise ValueError("'min_power' can not be larger than 'max_power'.")
self.name = name
self.id = next(self._ids)
self.max_power = max_power
self.min_power = min_power
self.modes = {"max": max_power, "min": min_power}
def __repr__(self):
return f"{self.__class__.__name__}(self, max_power={self.max_power}, min_power={self.min_power})"
def set_load(self, load):
"""Set Asset load in MW.
Convention is negative value for consumption and positive value
for production. Subclasses might use a different convention if
this seems more intiutive.
Returns the load that is set in MW.
"""
if load < self.min_power or load > self.max_power:
warnings.warn(
f"Chosen Asset load for {self.name} is out of range. "
f"Should be between {self.min_power} and {self.max_power}. "
f"Function will return boundary load level for now."
)
load = min(max(load, self.min_power), self.max_power)
return load
def set_mode(self, mode):
""" """
load = self.modes[mode]
return self.set_load(load)
def MW_to_MWh(self, MW):
"""Performs conversion from MW to MWh using the time_factor variable."""
return MW * self.time_factor
def MWh_to_MW(self, MWh):
"""Performs conversion from MWh to MW using the time_factor variable."""
return MWh / self.time_factor
def set_freq(self, freq):
"""
Function that aligns time frequency between Model and Asset.
Can be '1T', '15T' or 'H'
The time_factor variable is used in subclasses to perform MW to MWh conversions.
"""
self.freq = freq
self.time_factor = Asset._freq_to_multiplier[freq]
def set_financials(
self, capex, opex, devex, lifetime=None, depreciate=True, salvage_value=0
):
"""Set financial data of the asset."""
self.capex = capex
self.opex = opex
self.devex = devex
self.lifetime = lifetime
self.depreciate = depreciate
self.salvage_value = salvage_value
class Eboiler(Asset):
"""Subclass for an E-boiler."""
def __init__(self, name, max_power, min_power=0, efficiency=0.99):
super().__init__(name, min_power=-max_power, max_power=-min_power)
self.efficiency = efficiency
self.max_thermal_output = max_power * 0.99
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name}, max_power={self.max_power}, "
f"min_power={self.min_power}, efficiency={self.efficiency})"
)
def set_load(self, load):
"""Set load in MWe, returns (load, heat_output) in MWe and MWth
Convention is negative numbers for consumption.
Inserting a positive value will return an exception.
"""
if load > 0:
raise ValueError(
f"Eboiler.set_load() only accepts negative numbers by convention. "
f"{load} was inserted."
)
load = super().set_load(load)
heat_output = -load * self.efficiency
return (load, heat_output)
def set_heat_output(self, heat_output):
"""Set heat output in MWth, returns tuple (heat_output, eload) in MW"""
load = -heat_output / self.efficiency
load, heat_output = self.set_load(load)
return heat_output, load
class Heatpump(Asset):
"""Subclass for a Heatpump.
Use cop parameter to set fixed COP (float/int) or COP curve (func).
COP curve should take load in MWhe and return COP.
Parameters:
-----------
max_th_power : numeric
Maximum thermal output in MW (positive value)
cop_curve : numeric or list or function
3 ways to set the COP of the Heatpump:
(1) Fixed COP based on [numeric] value.
(2) Polynomial with coefficients based on [list] input.
Input coeficients in format [c0, c1, c2, ..., c(n)],
will generate Polynomial p(x) = c0 + c1*x + c2*x^2 ... cn*x^n,
where x = % thermal load (in % of thermal capacity) as decimal value.
Example:
cop=[1, 2, 3, 4] will result in following COP curve:
p(x) = 1 + 2x + 3x**2 + 4x**3,
(3) [function] in format func(*args, **kwargs)
Function should return a Polynomial that takes 'load_perc' as parameter.
min_th_power : numeric
Minimum thermal output in MW (positive value)
Notes:
------
Sign convention:
Thermal power outputs have positive values
Electric power inputs have negative values
"""
def __init__(
self,
name,
max_th_power,
cop_curve,
min_th_power=0,
):
if max_th_power < 0 or min_th_power < 0:
raise ValueError("Thermal power can not have negative values.")
if min_th_power > max_th_power:
raise ValueError("'min_th_power' can not be larger than 'max_th_power'.")
self.name = name
self.max_th_power = max_th_power
self.min_th_power = min_th_power
self.cop_curve = self._set_cop_curve(cop_curve)
def __repr__(self):
return (
f"{self.__class__.__name__}(name='{self.name}', max_thermal_power={self.max_th_power}, "
f"cop_curve={self.cop_curve}, min_th_power={self.min_th_power})"
)
# Is turning everything into a Polynomial the best solution here?
@staticmethod
@lru_cache(maxsize=None)
def _set_cop_curve(cop_curve):
"""Generate COP curve function based on different inputtypes.
Returns a function that takes *args **kwargs and returns a Polynomial.
"""
if isinstance(cop_curve, list):
def func(*args, **kwargs):
return Polynomial(cop_curve)
return func
return cop_curve
@lru_cache(maxsize=None)
def get_cop(self, heat_output, Tsink=None, Tsource=None):
"""Get COP corresponding to certain load.
Parameters:
-----------
heat_output : numeric
Thermal load in MW
Tsink : numeric
Sink temperature in degrees celcius
Tsource : numeric
Source temperature in degrees celcius
Notes:
------
Sign convention:
Positive values for thermal load
Negative values for electric load
"""
load_perc = heat_output / self.max_th_power
cop_curve = self.cop_curve
if not callable(cop_curve):
return cop_curve
else:
return cop_curve(Tsink=Tsink, Tsource=Tsource)(load_perc)
def th_to_el_power(self, heat_output, Tsink=None, Tsource=None):
if not self.min_th_power <= heat_output <= self.max_th_power:
warnings.warn(
f"Chosen heat output is out of range [{self.min_th_power} - {self.max_th_power}]. "
"Heat output is being limited to the closest boundary."
)
heat_output = min(max(heat_output, self.min_th_power), self.max_th_power)
cop = self.get_cop(heat_output=heat_output, Tsink=Tsink, Tsource=Tsource)
return -heat_output / cop
def set_load(self, *args, **kwargs):
raise NotImplementedError(
"Directly setting the electric load of the heatpump is not possible (yet). "
"Functionality will be implemented if there is a specific usecase for it."
)
@lru_cache(maxsize=None)
def set_heat_output(self, heat_output, Tsink=None, Tsource=None):
"""Set heat output in MWth, returns load of heatpump as tuple (MWe, MWth)"""
if not self.min_th_power <= heat_output <= self.max_th_power:
warnings.warn(
f"Chosen heat output is out of range [{self.min_th_power} - {self.max_th_power}]. "
"Heat output is being limited to the closest boundary."
)
heat_output = min(max(heat_output, self.min_th_power), self.max_th_power)
if Tsink is not None and Tsource is not None and Tsink <= Tsource:
raise ValueError(f"Tsource '{Tsource}' can not be higher than '{Tsink}'.")
cop = self.get_cop(heat_output=heat_output, Tsink=Tsink, Tsource=Tsource)
e_load = -heat_output / cop
return e_load, heat_output
def _cost_function(self, x, c1, c2, c3, Tsink=None, Tsource=None):
"""Objective function for set_opt_load function.
x = heatpump thermal load in MW
c1 = electricity_cost
c2 = alt_heat_price
c3 = demand
"""
return (
x / self.get_cop(heat_output=x, Tsink=Tsink, Tsource=Tsource) * c1
+ (c3 - x) * c2
)
@lru_cache(maxsize=None)
def set_opt_load(
self,
electricity_cost,
alt_heat_price,
demand,
Tsink=None,
Tsource=None,
tolerance=0.01,
):
"""Set optimal load of Heatpump with minimal total heat costs.
Function uses np.minimize_scalar to minimize cost function.
Parameters:
-----------
electricity_cost:
Cost of input electricity in /MWh(e)
alt_heat_price:
Price of heat from alternative source in /MWh(th)
demand:
Heat demand in MW(th)
Returns:
--------
Optimal load of heatpump as tuple (MWe, MWth)
"""
c1 = electricity_cost
c2 = alt_heat_price
c3 = demand
cop_curve = self.cop_curve
if isinstance(cop_curve, Number):
if c1 / cop_curve <= c2:
return self.max_th_power
else:
return self.min_th_power
obj_func = partial(
self._cost_function, c1=c1, c2=c2, c3=c3, Tsink=Tsink, Tsource=Tsource
)
low_bound = 0
up_bound = min(c3, self.max_th_power)
opt_th_load = minimize_scalar(
obj_func,
bounds=(low_bound, up_bound),
method="bounded",
options={"xatol": tolerance},
).x
opt_e_load, opt_th_load = self.set_heat_output(
opt_th_load, Tsink=Tsink, Tsource=Tsource
)
return opt_e_load, opt_th_load
class Battery(Asset):
"""Subclass for a Battery.
Battery is modeled as follows:
- Rated power is power in MW that battery can
import from and export to the grid
- Efficiency loss is applied at charging, meaning that
SoC increase when charging is lower than the SoC decrease
when discharging
"""
def __init__(
self,
name,
rated_power,
rated_capacity,
roundtrip_eff,
min_soc=0,
max_soc=1,
soc_at_start=None,
cycle_lifetime=None,
):
super().__init__(name=name, max_power=rated_power, min_power=-rated_power)
self.capacity = rated_capacity
self.min_soc = min_soc
self.max_soc = max_soc
self.min_chargelevel = min_soc * self.capacity
self.max_chargelevel = max_soc * self.capacity
self.rt_eff = roundtrip_eff
self.one_way_eff = np.sqrt(roundtrip_eff)
self.cycle_count = 0
self.cycle_lifetime = cycle_lifetime
soc_at_start = min_soc if soc_at_start is None else soc_at_start
self.set_chargelevel(soc_at_start * self.capacity)
def __repr__(self):
return (
f"Battery(self, rated_power={self.max_power}, rated_capacity={self.capacity}, "
f"roundtrip_eff={self.rt_eff}, min_soc={self.min_soc}, max_soc={self.max_soc})"
)
def get_soc(self):
"""Get the SoC in % (decimal value)"""
return self.chargelevel / self.capacity
def set_chargelevel(self, chargelevel):
"""Set the chargelevel in MWh. Will automatically change the SoC accordingly."""
# if round(chargelevel,2) < round(self.min_chargelevel,2) or round(chargelevel,2) > round(self.max_chargelevel,2):
# raise ValueError(
# f"Tried to set Charge Level to {chargelevel}. "
# f"Charge Level must be a value between "
# f"{self.min_chargelevel} and {self.max_chargelevel} (in MWh)"
# )
self.chargelevel = chargelevel
def set_load(self, load):
"""Set load of the battery.
Use negative values for charging and positive values for discharging.
Returns actual chargespeed, considering technical limitations of the battery.
Note: We currently assume all efficiency losses occur during charging (no losses during discharge)
"""
if not hasattr(self, "freq"):
raise AttributeError(
"Time frequency of the model is not defined. "
"Assign asset to a CaseStudy or use Asset.freq(). "
"to set de time frequency and try again."
)
load = super().set_load(load)
unbound_charging = self.MW_to_MWh(load)
if load < 0:
unbound_charging *= self.rt_eff
chargelevel = self.chargelevel
max_charging = chargelevel - self.max_chargelevel
max_discharging = chargelevel - self.min_chargelevel
bound_charging = min(max(unbound_charging, max_charging), max_discharging)
newcl = chargelevel - bound_charging
self.set_chargelevel(newcl)
if bound_charging < 0:
bound_charging /= self.rt_eff
self.cycle_count += abs(bound_charging / (self.capacity * 2))
return self.MWh_to_MW(bound_charging)
def charge(self, chargespeed):
"""Charge the battery with given chargespeed.
Redirects to Battery.set_load().
Returns load (negative value for charging).
"""
chargespeed = self.max_power if chargespeed == "max" else chargespeed
if chargespeed < 0:
raise ValueError(
f"Chargespeed should be always be a positive value by convention. "
f"Inserted {chargespeed}."
)
chargespeed = self.set_load(-chargespeed)
return chargespeed
def discharge(self, dischargespeed):
"""Discharge the battery by given amount.
Redirects to Battery.set_load().
Returns load (positive value for discharging).
"""
dischargespeed = self.max_power if dischargespeed == "max" else dischargespeed
if dischargespeed < 0:
raise ValueError(
f"Dischargespeed should be always be a positive value by convention. "
f"Inserted {dischargespeed}."
)
dischargespeed = self.set_load(dischargespeed)
return dischargespeed
def get_cost_per_cycle(self, cycle_lifetime):
return self.capex / self.cycle_lifetime
class EV(Battery):
def __init__(
self,
name,
rated_power,
rated_capacity,
roundtrip_eff,
min_soc=0,
max_soc=1,
soc_at_start=None,
id=None,
):
super().__init__(
name,
rated_power,
rated_capacity,
roundtrip_eff,
min_soc,
max_soc,
soc_at_start,
)
if id:
self.id = id
class HotWaterStorage(Battery):
"""Subclass for a storage asset.
Parameters:
-----------
rated_capacity : int/float
Rated capacity in MWh
min_buffer_level_perc : float
Minimum buffer level in %
buffer_level_at_start : float
Buffer level at start in %
"""
def __init__(
self,
name,
rated_power,
capacity_per_volume,
volume,
temperature,
min_storagelevel,
initial_storagelevel=None,
):
rated_capacity = capacity_per_volume * volume
if not initial_storagelevel:
initial_storagelevel = min_storagelevel
soc_at_start = initial_storagelevel / rated_capacity
max_storagelevel = rated_capacity * 0.95
min_soc = min_storagelevel / rated_capacity
max_soc = max_storagelevel / rated_capacity
self.temperature = temperature
super().__init__(
name=name,
rated_power=rated_power,
rated_capacity=rated_capacity,
roundtrip_eff=1,
min_soc=min_soc,
max_soc=max_soc,
soc_at_start=soc_at_start,
)
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name}, rated_power={self.max_power}, capacity={self.capacity}, "
f"temperature={self.temperature}, min_storagelevel={self.min_chargelevel})"
)
@property
def charging_power_limit(self):
max_charging_energy = self.max_chargelevel - self.chargelevel
return min(self.MWh_to_MW(max_charging_energy), -self.min_power)
@property
def discharging_power_limit(self):
max_discharging_energy = self.chargelevel - self.min_chargelevel
return min(self.MWh_to_MW(max_discharging_energy), self.max_power)
class GasBoiler(Asset):
"""Representation of a Gas-fired boiler.
name : str
Unique name of the asset
max_th_output : numeric
Maximum thermal output in MW thermal
efficiency : float
Thermal efficiency of the gasboiler as decimal value.
min_th_output : numeric
Minimum thermal output in MW thermal
"""
def __init__(
self,
name,
max_th_output,
min_th_output=0,
efficiency=0.9,
):
super().__init__(name=name, max_power=max_th_output, min_power=min_th_output)
self.efficiency = efficiency
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name}, max_power={self.max_power}, "
f"min_power={self.min_power}, efficiency={self.efficiency})"
)
def set_load(self, *args, **kwargs):
raise NotImplementedError(
"Gasboiler does not have electric load. "
"Use Gasboiler.set_heat_output() instead."
)
@lru_cache(maxsize=None)
def set_heat_output(self, output):
"""Redirect to Gasboiler.set_load()"""
heat_output = super().set_load(output)
gas_input = -heat_output / self.efficiency
return heat_output, gas_input
class Electrolyser(Asset):
def __init__(
self,
name,
rated_power,
kwh_per_kg=60,
min_flex_load_in_perc=15,
):
min_flex_power = min_flex_load_in_perc / 100 * rated_power
super().__init__(name=name, max_power=-min_flex_power, min_power=-rated_power)
self.rated_power = rated_power
self.min_flex_load = min_flex_load_in_perc
self.min_flex_power = self.min_flex_load / 100 * self.rated_power
self.kwh_per_kg = kwh_per_kg
self.kg_per_MWh = 1000 / self.kwh_per_kg
def __repr__(self):
return (
f"Electrolyser(name={self.name}, rated_power={self.rated_power}, "
f"kwh_per_kg={self.kwh_per_kg}, flex_range_in_perc=[{self.min_flex_load}, "
f"{self.max_flex_load}])"
)
def set_load(self, load):
"""Set load of the Electrolyser in MW."""
if not hasattr(self, "freq"):
raise AttributeError(
"Time frequency of the model is not defined. "
"Assign asset to a CaseStudy or use Asset.freq(). "
"to set de time frequency and try again."
)
load = -abs(load)
load = super().set_load(load)
h2_output_kg = self.MW_to_MWh(-load) * self.kg_per_MWh
return load, h2_output_kg
class Battolyser(Asset):
def __init__(
self,
name,
rated_power,
rated_capacity,
rt_eff,
soc_at_start=None,
):
super().__init__(name=name, max_power=rated_power, min_power=-rated_power)
self.capacity = rated_capacity
self.min_soc = 0.05
self.max_soc = 1.00
self.min_chargelevel = self.min_soc * self.capacity
self.max_chargelevel = self.max_soc * self.capacity
self.rt_eff = rt_eff
self.cycle_count = 0
soc_at_start = self.min_soc if soc_at_start is None else soc_at_start
self.set_chargelevel(soc_at_start * self.capacity)
def __repr__(self):
return (
f"Battolyser(name={self.name}, rated_power={self.max_power}, "
f"rated_capacity={self.capacity}, rt_eff={self.rt_eff})"
)
def get_soc(self):
"""Get the SoC in % (decimal value)"""
return self.chargelevel / self.capacity
def set_chargelevel(self, chargelevel):
"""Set the chargelevel in MWh. Will automatically change the SoC accordingly."""
if chargelevel < self.min_chargelevel or chargelevel > self.max_chargelevel:
raise ValueError(
f"Tried to set Charge Level to {chargelevel}. "
f"Charge Level must be a value between "
f"{self.min_chargelevel} and {self.max_chargelevel} (in MWh)"
)
self.chargelevel = chargelevel
def set_load(self, load):
"""Set load of the Battolyser in MW.
Use negative values for charging and positive values for discharging.
Returns actual chargespeed, considering technical limitations of the battery.
Note: We currently assume all efficiency losses occur during discharging
(no losses during charging)
"""
if not hasattr(self, "freq"):
raise AttributeError(
"Time frequency of the model is not defined. "
"Assign asset to a CaseStudy or use Asset.freq(). "
"to set de time frequency and try again."
)
load = super().set_load(load)
unbound_charging = self.MW_to_MWh(load)
if load > 0:
unbound_charging /= self.rt_eff
chargelevel = self.chargelevel
max_charging = chargelevel - self.max_chargelevel
max_discharging = chargelevel - self.min_chargelevel
bound_charging = min(max(unbound_charging, max_charging), max_discharging)
newcl = chargelevel - bound_charging
self.set_chargelevel(newcl)
if bound_charging > 0:
bound_charging *= self.rt_eff
charging_power = self.MWh_to_MW(bound_charging)
h2_power = -self.MWh_to_MW(max(bound_charging - unbound_charging, 0))
self.cycle_count += abs(bound_charging / (self.capacity * 2))
return charging_power, h2_power
def charge(self, chargespeed):
"""Charge the battery with given chargespeed.
Redirects to Battery.set_load().
Returns load (negative value for charging).
"""
chargespeed = self.max_power if chargespeed == "max" else chargespeed
if chargespeed < 0:
raise ValueError(
f"Chargespeed should be always be a positive value by convention. "
f"Inserted {chargespeed}."
)
chargespeed, h2_prod_in_MW = self.set_load(-chargespeed)
return chargespeed, h2_prod_in_MW
def discharge(self, dischargespeed):
"""Discharge the battery by given amount.
Redirects to Battery.set_load().
Returns load (positive value for discharging).
"""
dischargespeed = self.max_power if dischargespeed == "max" else dischargespeed
if dischargespeed < 0:
raise ValueError(
f"Dischargespeed should be always be a positive value by convention. "
f"Inserted {dischargespeed}."
)
dischargespeed = self.set_load(dischargespeed)[0]
return dischargespeed
##Added by Shahla, very similar to Hotwaterstorage
class HeatBuffer(Battery):
"""Subclass for a storage asset.
Parameters:
-----------
rated_capacity : int/float
Rated capacity in MWh
min_buffer_level_perc : float
Minimum buffer level in %
buffer_level_at_start : float
Buffer level at start in %
"""
def __init__(
self, name, rated_capacity, min_buffer_level_perc, buffer_level_at_start
):
super().__init__(
name=name,
rated_power=100,
rated_capacity=rated_capacity,
roundtrip_eff=1,
min_soc=min_buffer_level_perc,
max_soc=1,
soc_at_start=buffer_level_at_start,
)

@ -0,0 +1,11 @@
import os
from pathlib import Path
# if os.environ.get("USERNAME") == "mekre":
# BASEPATH = Path("C:\\Users\\mekre\\")
# elif os.environ.get("USERNAME") == "Karel van Doesburg":
# BASEPATH = Path("C:\\RecoyShare\\")
# elif os.environ.get("USERNAME") == "Shahla Huseynova":
# BASEPATH = Path("C:\\Users\\Shahla Huseynova\\")
# elif os.environ.get("USERNAME") == "shahla.huseynova":
BASEPATH = Path("(C:\\Users\\shahla.huseynova\\Heliox Group B.V\\")

@ -0,0 +1,537 @@
import warnings
from copy import deepcopy
import numpy as np
import pandas as pd
from .framework import TimeFramework
from .financial import (
calc_business_case,
calc_co2_costs,
calc_electr_market_results,
calc_grid_costs,
calculate_eb_ode,
)
from .forecasts import Mipf, Qipf
from .prices import get_ets_prices, get_ttf_prices
from .converters import EURpertonCO2_to_EURperMWh
class CaseStudy:
"""
Representation of a casestudy
"""
instances = {}
def __init__(self, time_fw: TimeFramework, freq, name, data=None, forecast=None):
self.name = name
self.modelled_time_period_years = time_fw.modelled_time_period_years
self.start = time_fw.start
self.end = time_fw.end
self.freq = freq
self.dt_index = time_fw.dt_index(freq)
self.data = pd.DataFrame(index=self.dt_index)
self.assets = {}
self.cashflows = {}
self.irregular_cashflows = {}
self.capex = {}
self.total_capex = 0
self.kpis = {}
amount_of_days_in_year = 365
if self.start.year % 4 == 0:
amount_of_days_in_year = 366
self.year_case_duration = (self.end - self.start).total_seconds() / (
3600 * 24 * amount_of_days_in_year
)
self.days_case_duration = self.year_case_duration * amount_of_days_in_year
self.hours_case_duration = self.days_case_duration * 24
self.quarters_case_duration = self.days_case_duration * 24 * 4
self.minutes_case_duration = self.days_case_duration * 24 * 60
# self.year_case_duration = 1
if data is not None:
if len(data) != len(self.data):
raise ValueError(
"Length of data is not same as length of CaseStudy.data"
)
data.index = self.dt_index
self.data = pd.concat([self.data, data], axis=1)
if forecast is not None:
self.add_forecast(forecast, freq)
CaseStudy.instances[self.name] = self
@classmethod
def list_instances(cls):
"""
Returns a list with all CaseStudy instances.
Useful if you want to iterate over all instances
or use them as input to a function.
"""
return list(cls.instances.values())
def add_forecast(self, forecast, freq):
"""
Add forecast and price data to the data table of the CaseStudy instance.
"""
# TODO Add error handling for frequencies
if forecast == "mipf" and freq == "1T":
forecast_data = Mipf(
start=self.start, end=self.end, tidy=True, include_nextQ=False
).data
elif forecast == "mipf" and freq == "15T":
forecast_data = Mipf(
start=self.start, end=self.end, tidy=False, include_nextQ=False
).data
elif forecast == "qipf":
forecast_data = Qipf(start=self.start, end=self.end, freq=self.freq).data
else:
raise ValueError("Forecast does not exist. Use 'mipf' or 'qipf'.")
self.data = pd.concat([self.data, forecast_data], axis=1)
def add_gasprices(self):
"""
Add gas price data (TTF day-head) to the data table of the CaseStudy instance.
"""
self.data["Gas prices (€/MWh)"] = get_ttf_prices(
start=self.start, end=self.end, freq=self.freq
)["Gas prices (€/MWh)"]
def add_co2prices(self, perMWh=False):
"""
Add CO2 prices (ETS) data to the data table of the CaseStudy instance.
"""
self.data["CO2 prices (€/ton)"] = get_ets_prices(
start=self.start, end=self.end, freq=self.freq
)["CO2 prices (€/MWh)"]
if perMWh:
self.data["CO2 prices (€/MWh)"] = EURpertonCO2_to_EURperMWh(
self.data["CO2 prices (€/ton)"]
).round(2)
def add_asset(self, asset):
"""Assign an Asset instance to CaseStudy instance.
Method will create a unique copy of the Asset instance.
If Asset contains financial information,
cashflows are automatically updated.
"""
assetcopy = deepcopy(asset)
assetcopy.set_freq(self.freq)
self.assets[assetcopy.name] = assetcopy
if hasattr(assetcopy, "opex"):
self.add_cashflow(f"{assetcopy.name} OPEX (€)", -assetcopy.opex)
if hasattr(assetcopy, "capex"):
self.add_capex(f"{assetcopy.name} CAPEX (€)", -assetcopy.capex)
if hasattr(assetcopy, "devex"):
self.add_capex(f"{assetcopy.name} DEVEX (€)", -assetcopy.devex)
def get_assets(self):
"""Returns all Asset instances assigned to CaseStudy instance."""
return list(self.assets.values())
def add_cashflow(self, label, amount):
"""Add a yearly cashflow to the CaseStudy
Convention is negative values for costs and positive values for revenue.
"""
self.cashflows[label] = round(amount, 2)
def add_capex(self, label, amount):
"""Add a capex component to the CaseStudy
Convention is to use positive values
"""
capex = round(amount, 2) * -1
self.capex[label] = capex
self.total_capex += capex
def add_irregular_cashflow(self, amount, year):
base = self.irregular_cashflows[year] if year in self.irregular_cashflows else 0
self.irregular_cashflows[year] = base + amount
def generate_electr_market_results(self, real_col, nom_col=None):
"""Generates a dictionary with results of the simulation on energy market.
Dictionary is saved in CaseStudy.energy_market_results.
Total market result is automatically added to cashflow dictionary.
"""
if nom_col is None:
nom_col = "Nom. vol."
self.data[nom_col] = 0
data = calc_electr_market_results(self.data, nom_col=nom_col, real_col=real_col)
self.data = data
total_produced = data["Prod. vol."].sum()
total_consumed = -data["Cons. vol."].sum()
self.total_electricity_cons = total_consumed * (-1)
selling = data[real_col] > 0
mean_selling_price = (
data["Combined Result"].where(selling).sum() / total_produced
if total_produced != 0
else 0
)
mean_buying_price = (
data["Combined Result"].where(~selling).sum() / total_consumed * (-1)
if round(total_consumed, 2) != 0
else 0
)
total_comb_result = data["Combined Result"].sum()
self.electr_market_results = {
"Total net volume (MWh)": data[real_col].sum(),
"Total exported to grid (MWh)": total_produced,
"Total consumed from grid (MWh)": total_consumed,
"Total nominated volume (MWh)": data[nom_col].sum(),
"Absolute imbalance volume (MWh)": data["Imb. vol."].abs().sum(),
"Mean selling price (€/MWh)": mean_selling_price,
"Mean buying price (€/MWh)": mean_buying_price,
"Total day-ahead result (€)": data["Day-Ahead Result"].sum(),
"Total POS result (€)": data["POS Result"].sum(),
"Total NEG result (€)": data["NEG Result"].sum(),
"Total imbalance result (€)": data["Imbalance Result"].sum(),
"Total combined result (€)": total_comb_result,
}
self.electr_market_result = total_comb_result
self.add_cashflow("Result on electricity market (€)", total_comb_result)
def add_gas_costs(self, gasvolumes_col, gasprice_col="Gas prices (€/MWh)"):
"""Calculate gas costs and add to cashflows
Parameters:
-----------
gasprices_col : str
Column containing gas prices in CaseStudy.data dataframe
gasvolumes_col : str
List of column names containing gas volumes in CaseStudy.data dataframe
"""
gasprices = self.data[gasprice_col]
gasvolumes = self.data[gasvolumes_col].abs()
gas_costs = gasprices * gasvolumes * -1
self.data["Gas commodity costs (€)"] = gas_costs
self.total_gas_cons = gasvolumes.sum()
self.total_gas_costs = round(gas_costs.sum(), 2)
self.add_cashflow("Gas consumption costs (€)", self.total_gas_costs)
def add_co2_costs(
self, volume_cols, co2_price_col="CO2 prices (€/ton)", fuel="gas"
):
"""Calculate co2 costs and add to cashflows
Parameters:
-----------
Gasprices : str
Column containing gas prices in CaseStudy.data dataframe
Gasvolumes : list
List of column names containing gas volumes in CaseStudy.data dataframe
"""
if isinstance(volume_cols, str):
volume_cols = [volume_cols]
co2_prices = self.data[co2_price_col]
volumes = [self.data[col] for col in volume_cols]
self.total_co2_costs = calc_co2_costs(
co2_prices=co2_prices, volumes=volumes, fuel=fuel
)
self.add_cashflow("CO2 emission costs (€)", self.total_co2_costs)
def add_eb_ode(
self,
commodity,
cons_col=None,
tax_bracket=None,
base_cons=None,
horti=False,
m3=False,
add_cons_MWh=0,
year=2020,
split=False,
):
"""Add EB & ODE to cashflows
See financial.calc_eb_ode() for more detailed documentation.
Parameters:
-----------
commodity : str
{'gas', 'electricity'}
cons_col : str
Optional parameter to specificy column name of the
consumption values in MWh.
tax_bracket : numeric
Tax bracket that the client is in [1-4]
Use either 'tax_bracket' of 'base_cons', not both.
base_cons : numeric
Base consumption volume of the client
Use either 'tax_bracket' of 'base_cons', not both.
horti : bool
Set to True to use horticulture rates
m3 : bool
Set to True if you want to enter gas volumes in m3
add_cons_MWh :
Enables manually adding extra consumption
"""
if cons_col:
cons = self.data[cons_col].abs().sum()
else:
cons = getattr(self, f"total_{commodity}_cons")
cons = cons + add_cons_MWh
eb, ode = calculate_eb_ode(
cons=cons,
electr=(commodity == "electricity"),
tax_bracket=tax_bracket,
base_cons=base_cons,
horti=horti,
m3=m3,
year=year,
)
if split:
self.add_cashflow(f"EB {commodity.capitalize()} (€)", eb)
self.add_cashflow(f"ODE {commodity.capitalize()} (€)", ode)
else:
self.add_cashflow(f"{commodity.capitalize()} taxes (€)", eb + ode)
def add_grid_costs(
self,
power_MW_col,
grid_operator,
year,
connection_type,
cons_MWh_col=None,
kw_contract_kW=None,
path=None,
add_peak_kW=0,
add_cons_MWh=0,
):
"""Add variable grid transport costs to cashflows
See financial.calc_grid_costs() for more detailed documentation.
Parameters:
-----------
power_MW_col : str
Column in data table with power usage in MW
grid_operator : str
{'tennet', 'liander', 'enexis', 'stedin'}
year : int
Year, e.g. 2020
connection_type : str
Connection type, e.g. 'HS'
cons_MWh_col : str
Column in data table containing grid consumption in MWh
kw_contract_kW : numeric
in kW. If provided, function will assume fixed value kW contract
path : str
Specify path with grid tariff files. Leave empty to use default path.
add_peak_kW : float
Enables manually adding peak consumption to the data
"""
cols = [power_MW_col]
if cons_MWh_col is not None:
cols.append(cons_MWh_col)
peaks_kW = (
(self.data[power_MW_col] * 1000 - add_peak_kW)
.resample("15T")
.mean()
.abs()
.resample("M")
.max()
.to_list()
)
cons_kWh = (
self.data[cons_MWh_col].sum() * 1000 if cons_MWh_col is not None else 0
) + add_cons_MWh
self.grid_costs = calc_grid_costs(
peakload_kW=peaks_kW,
grid_operator=grid_operator,
year=year,
connection_type=connection_type,
kw_contract_kW=kw_contract_kW,
totalcons_kWh=cons_kWh,
path=path,
modelled_time_period_years=self.modelled_time_period_years,
)
total_grid_costs = sum(self.grid_costs.values())
self.add_cashflow("Grid transport costs (€)", total_grid_costs)
def calculate_ebitda(self, project_duration, residual_value=None):
"""Calculate yearly EBITDA based on cashflows
Calculation table and EBITDA value are saved in CaseStudy.
"""
for key, val in self.cashflows.items():
if np.isnan(val):
warnings.warn(
f"Cashflow '{key}' for CaseStudy '{self.name}' contains NaN value. "
"Something might have gone wrong. Replacing NaN with 0 for now."
)
self.cashflows[key] = 0
assets = self.get_assets()
for asset in assets:
if not asset.depreciate:
pass
elif asset.lifetime is None:
raise ValueError(f"'lifetime' property of {asset.name} was not set.")
elif project_duration > asset.lifetime:
warnings.warn(
f"Project duration is larger than technical lifetime of asset '{asset.name}'. "
"Will continue by limiting project duration to the technical lifetime of the asset."
)
project_duration = int(asset.lifetime)
depreciations, residual_value = CaseStudy._calc_depr_and_residual_val(
assets, self.total_capex, residual_value, project_duration
)
depreciations = self.total_capex / project_duration
self.ebitda = sum(self.cashflows.values())
self.ebitda_calc = deepcopy(self.cashflows)
self.ebitda_calc["EBITDA (€)"] = self.ebitda
self.ebitda_calc["Depreciation (€)"] = depreciations * -1
self.ebitda_calc["EBITDA + depr (€)"] = self.ebitda + depreciations * -1
def calculate_business_case(
self,
project_duration,
discount_rate,
residual_value=None,
baseline=None,
bl_res_value=None,
eia=False,
vamil=False,
fixed_income_tax=False,
):
"""Calculates business case (NPV, IRR) for the CaseStudy.
Business case calculation is stored in CaseStudy.business_case
NPV is stored in CaseStudy.npv
IRR is stored in Casestudy.irr
Parameters:
-----------
project_duration : int
In years
discount_rate : float
In % (decimal value)
residual_value : numeric
Can be used to manually set residual value of assets (all assets combined).
Defaults to None, in which case residual_value is calculated
based on linear depreciation over technical lifetime.
baseline : CaseStudy
Baseline to compare against
bl_res_value : numeric
Similar to 'residual_value' for baseline
eia : bool
Apply EIA ("Energie Investerings Aftrek") tax discounts.
Defaults to False.
vamil : bool
Apply VAMIL ("Willekeurige afschrijving milieu-investeringen") tax discounts.
Defaults to False.
"""
assets = self.get_assets()
for asset in assets:
if not asset.depreciate:
pass
elif asset.lifetime is None:
raise ValueError(f"'lifetime' property of {asset.name} was not set.")
elif project_duration > asset.lifetime:
warnings.warn(
f"Project duration is larger than technical lifetime of asset '{asset.name}'. "
"Will continue by limiting project duration to the technical lifetime of the asset."
)
project_duration = int(asset.lifetime)
capex = self.total_capex
yearly_ebitda = self.ebitda / self.modelled_time_period_years
irregular_cashflows = (
self._calc_irregular_cashflows(project_duration, baseline=baseline)
if self.irregular_cashflows
else 0
)
depreciations, residual_value = CaseStudy._calc_depr_and_residual_val(
assets, capex, residual_value, project_duration
)
if baseline is not None:
bl_assets = baseline.assets.values()
bl_capex = baseline.total_capex
bl_depr, bl_res_val = CaseStudy._calc_depr_and_residual_val(
bl_assets, bl_capex, bl_res_value, project_duration
)
capex -= bl_capex
depreciations -= bl_depr
residual_value -= bl_res_val
yearly_ebitda -= baseline.ebitda / self.modelled_time_period_years
self.business_case = calc_business_case(
capex=capex,
discount_rate=discount_rate,
project_duration=project_duration,
depreciation=depreciations,
residual_value=residual_value,
regular_earnings=yearly_ebitda,
irregular_cashflows=irregular_cashflows,
eia=eia,
vamil=vamil,
fixed_income_tax=fixed_income_tax,
)
self.irr = self.business_case.loc["IRR (%)", "Year 0"] / 100
self.npv = self.business_case.loc["NPV (€)", "Year 0"]
self.spp = self.business_case.loc["Simple Payback Period", "Year 0"]
@staticmethod
def _calc_depr_and_residual_val(assets, capex, residual_value, project_duration):
if residual_value is None:
assets = [asset for asset in assets if asset.depreciate]
depreciations = sum(
(asset.capex - asset.salvage_value) / asset.lifetime for asset in assets
)
residual_value = capex - depreciations * project_duration
else:
depreciations = (capex - residual_value) / project_duration
return depreciations, residual_value
def _calc_irregular_cashflows(self, project_duration, baseline=None):
irr_earnings = [0] * (project_duration)
for year, cashflow in self.irregular_cashflows.items():
if baseline:
cashflow -= baseline.irregular_cashflows.get(year, 0)
irr_earnings[int(year) - 1] = cashflow
return irr_earnings

@ -0,0 +1,43 @@
recoy_colordict = {
"RecoyDarkBlue": "#0e293b",
"RecoyBlue": "#1f8376",
"RecoyRed": "#dd433b",
"RecoyYellow": "#f3d268",
"RecoyGreen": "#46a579",
"RecoyPurple": "#6d526b",
"RecoyOrange": "#f2a541",
"RecoyBlueGrey": "#145561",
"RecoyDarkGrey": "#2a2a2a",
"RecoyLilac": "#C3ACCE",
"RecoyBrown": "#825E52",
"RecoyLightGreen": "#7E9181",
"RecoyCitron": "#CFD186",
"RecoyPink": "#F5B3B3"
}
recoy_greysdict = {
"RecoyLightGrey": "#e6e6e6",
"RecoyGrey": "#c0c0c0",
"RecoyDarkGrey": "#2a2a2a",
}
recoydarkblue = recoy_colordict["RecoyDarkBlue"]
recoyyellow = recoy_colordict["RecoyYellow"]
recoygreen = recoy_colordict["RecoyGreen"]
recoyred = recoy_colordict["RecoyRed"]
recoyblue = recoy_colordict["RecoyBlue"]
recoyorange = recoy_colordict["RecoyOrange"]
recoypurple = recoy_colordict["RecoyPurple"]
recoybluegrey = recoy_colordict["RecoyBlueGrey"]
recoylightgrey = recoy_greysdict["RecoyLightGrey"]
recoygrey = recoy_greysdict["RecoyGrey"]
recoydarkgrey = recoy_greysdict["RecoyDarkGrey"]
recoylilac = recoy_colordict["RecoyLilac"]
recoybrown = recoy_colordict["RecoyBrown"]
recoylightgreen = recoy_colordict["RecoyLightGreen"]
recoycitron = recoy_colordict["RecoyCitron"]
recoypink = recoy_colordict["RecoyPink"]
recoycolors = list(recoy_colordict.values())
transparent = "rgba(0, 0, 0, 0)"

@ -0,0 +1,66 @@
import pandas as pd
def MWh_to_m3(MWh):
return MWh / 9.769 * 1000
def MWh_to_GJ(MWh):
return MWh * 3.6
def EURperm3_to_EURperMWh(EURperm3):
return EURperm3 / 9.769 * 1000
def EURperMWh_to_EURperGJ(EURperMWh):
return EURperMWh * 3.6
def MWh_gas_to_tonnes_CO2(MWh):
return MWh * 1.84 / 9.769
def EURpertonCO2_to_EURperMWh(EURpertonCO2):
return EURpertonCO2 * 1.884 / 9.769
def EURperLHV_to_EURperHHV(MWh_LHV):
return MWh_LHV / 35.17 * 31.65
def EURperHHV_to_EURperLHV(MWh_HHV):
return MWh_HHV / 31.65 * 35.17
def GJ_gas_to_kg_NOX(GJ):
return GJ * 0.02
def MWh_gas_to_kg_NOX(MWh):
return GJ_gas_to_kg_NOX(MWh_to_GJ(MWh))
def fastround(n, decimals):
"""Round a value to certain number of decimals, faster than Python implementation"""
multiplier = 10**decimals
return int(n * multiplier + 0.5) / multiplier
def add_season_column(data):
"""Adds a column containing seasons to a DataFrame with datetime index"""
data["season"] = (data.index.month % 12 + 3) // 3
seasons = {1: "Winter", 2: "Spring", 3: "Summer", 4: "Fall"}
data["season"] = data["season"].map(seasons)
return data
def dt_column_to_local_time(column):
return column.dt.tz_localize("UTC").dt.tz_convert("Europe/Amsterdam")
def timestamp_to_utc(timestamp):
if isinstance(timestamp, str):
timestamp = pd.to_datetime(timestamp).tz_localize("Europe/Amsterdam")
return timestamp.tz_convert("UTC")

@ -0,0 +1,69 @@
from sqlalchemy import create_engine, MetaData, Table
import pandas as pd
DATABASES = {
"ngsc_dev": {
"db_url": "ngsc-dev-msql.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "ngsc_dev",
"db_user": "ngsc_dev",
"db_password": "AKIAZQ2BV5F5K6LLBC47",
"db_port": "1433",
},
"ngsc_test": {
"db_url": "ngsc-test-msql.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "ngsc_test",
"db_user": "ngsc_test",
"db_password": "AKIAZQ2BV5F5K6LLBC47",
"db_port": "1433",
},
"ngsc_prod": {
"db_url": "rop-ngsc-prod.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "ngsc_test",
"db_user": "ngsc_test",
"db_password": "AKIAZQ2BV5F5K6LLBC47",
"db_port": "1433",
},
"rop_prices_test": {
"db_url": "rop-prices-test.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "test",
"db_user": "rop",
"db_password": "OptimalTransition",
"db_port": "8472",
},
"rop_assets_test": {
"db_url": "rop-assets-test.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "test",
"db_user": "rop",
"db_password": "OptimalTransition",
"db_port": "1433",
},
}
def db_engine(db_name):
db_config = DATABASES[db_name]
connection_string = (
f"mssql+pyodbc://{db_config['db_user']}:{db_config['db_password']}"
f"@{db_config['db_url']}:{db_config['db_port']}/"
f"{db_config['db_name']}?driver=ODBC+Driver+17+for+SQL+Server"
)
return create_engine(connection_string)
def read_entire_table(table_name, db_engine):
return pd.read_sql_table(table_name, db_engine)
def create_connection(engine, tables):
connection = engine.connect()
metadata = MetaData()
if isinstance(tables, str):
return connection, Table(tables, metadata, autoload=True, autoload_with=engine)
else:
db_tables = {
table: Table(table, metadata, autoload=True, autoload_with=engine)
for table in tables
}
return connection, db_tables

@ -0,0 +1,17 @@
import time
from functools import wraps
def time_method(func):
"""Prints the runtime of a method of a class."""
@wraps(func)
def wrapper_timer(self, *args, **kwargs):
start = time.perf_counter()
value = func(self, *args, **kwargs)
end = time.perf_counter()
run_time = end - start
print(f"Finished running {self.name} in {run_time:.2f} seconds.")
return value
return wrapper_timer

@ -0,0 +1,667 @@
from pathlib import Path
from datetime import timedelta
import numpy as np
import numpy_financial as npf
import pandas as pd
import warnings
def npv(discount_rate, cashflows):
cashflows = np.array(cashflows)
print('discount rate',discount_rate)
print('cashflows', cashflows)
print('answer', cashflows / (1 + discount_rate) ** np.arange(1, len(cashflows) + 1))
return (cashflows / (1 + discount_rate) ** np.arange(1, len(cashflows) + 1)).sum(
axis=0
)
def calc_electr_market_results(model, nom_col=None, real_col=None):
"""Function to calculate the financial result on Day-Ahead and Imbalance market for the input model.
Parameters:
-----------
model : df
DataFrame containing at least 'DAM', 'POS' and 'NEG' columns.
nom_col : str
Name of the column containing the Day-Ahead nominations in MWh
Negative values = Buy, positive values = Sell
imb_col : str
Name of the column containing the Imbalance volumes in MWh
Negative values = Buy, positive values = Sell
Returns:
--------
Original df with added columns showing the financial results per timeunit.
"""
if nom_col is None:
nom_col = "Nom. vol."
model[nom_col] = 0
producing = model[real_col] > 0
model["Prod. vol."] = model[real_col].where(producing, other=0)
model["Cons. vol."] = model[real_col].where(~producing, other=0)
model["Imb. vol."] = model[real_col] - model[nom_col]
model["Day-Ahead Result"] = model[nom_col] * model["DAM"]
model["POS Result"] = 0
model["NEG Result"] = 0
posimb = model["Imb. vol."] > 0
model["POS Result"] = model["POS"] * model["Imb. vol."].where(posimb, other=0)
model["NEG Result"] = model["NEG"] * model["Imb. vol."].where(~posimb, other=0)
model["Imbalance Result"] = model["POS Result"] + model["NEG Result"]
model["Combined Result"] = model["Day-Ahead Result"] + model["Imbalance Result"]
return model
def calc_co2_costs(co2_prices, volumes, fuel):
"""Calculates gas market results
Parameters:
-----------
co2_prices : numeric or array
CO2 prices in /ton
volumes : list
List of arrays containing volumes
fuel : list
List of arrays containing gas volumes
Returns:
--------
Returns a single negative value (=costs) in
"""
if not isinstance(volumes, list):
volumes = [volumes]
emission_factors = {
"gas": 1.884 / 9.769
} # in ton/MWh (based on 1.884 kg CO2/Nm3, 9.769 kWh/Nm3)
if fuel not in emission_factors.keys():
raise NotImplementedError(
f"Emission factor for chosen fuel '{fuel}' is not implemented."
f"Implement it by adding emission factor to the 'emission_factors' table."
)
emission_factor = emission_factors[fuel]
return -round(
abs(sum((array * emission_factor * co2_prices).sum() for array in volumes)), 2
)
def calculate_eb_ode(
cons,
electr=True,
year=2020,
tax_bracket=None,
base_cons=None,
horti=False,
m3=False,
):
"""Calculates energy tax and ODE for consumption of electricity or natural gas in given year.
Function calculates total tax to be payed for electricity or natural gas consumption,
consisting of energy tax ('Energiebelasting') and sustainable energy surcharge ('Opslag Duurzame Energie').
Tax bracket that applies is based on consumption level, with a different tax
rate for each bracket.
For Gas
1: 0 - 170.000 m3
3: 170.000 - 1 mln. m3
4: 1 mln. - 10 mln. m3
5: > 10 mln. m3
For Electricity
1: 0 - 10 MWh
2: 10 - 50 MWh
3: 50 - 10.000 MWh
4: > 10.000 MWh
Parameters:
-----------
cons : numeric
Total consumption in given year for which to calculate taxes.
Electricity consumption in MWh
Gas consumption in MWh (or m3 and use m3=True)
electr : bool
Set to False for natural gas rates. Default is True.
year : int
Year for which tax rates should be used. Tax rates are updated
annually and can differ significantly.
tax_bracket : int
Tax bracket (1-4) to assume.
Parameter can not be used in conjunction ith 'base_cons'.
base_cons : numeric
Baseline consumption to assume, in same unit as 'cons'.
Specified value is used to decide what tax bracket to start in.
Taxes from baseline consumption are not included in calculation of the tax amount.
Parameter can not be used in conjunction with 'tax_bracket'.
horti : bool
The horticulture sector gets a discount on gas taxes.
m3 : bool
Set to True if you want to enter gas consumption in m3.
Default is to enter consumption in MWh.
Returns:
--------
Total tax amount as negative number (costs).
Note:
-----
This function is rather complicated, due to all its optionalities.
Should probably be simplified or split into different functions.
"""
if tax_bracket is not None and base_cons is not None:
raise ValueError(
"Parameters 'tax_bracket' and 'base_cons' can not be used at the same time."
)
if tax_bracket is None and base_cons is None:
raise ValueError(
"Function requires input for either 'tax_bracket' or 'base_cons'."
)
cons = abs(cons)
commodity = "electricity" if electr else "gas"
if commodity == "gas":
if not m3:
cons /= 9.769 / 1000 # Conversion factor for gas: 1 m3 = 9.769 kWh
base_cons = base_cons / (9.769 / 1000) if base_cons is not None else None
else:
cons *= 1000 # conversion MWh to kWh
base_cons = base_cons * 1000 if base_cons is not None else None
tax_brackets = {
"gas": [0, 170_000, 1_000_000, 10_000_000],
"electricity": [0, 10_000, 50_000, 10_000_000],
}
tax_brackets = tax_brackets[commodity]
base_cons = tax_brackets[tax_bracket - 1] if tax_bracket else base_cons
if commodity == "gas" and horti:
commodity += "_horticulture"
eb_rates, ode_rates = get_tax_tables(commodity)
eb = 0
ode = 0
for bracket in range(4):
if bracket < 3:
br_lower_limit = tax_brackets[bracket]
br_upper_limit = tax_brackets[bracket + 1]
if base_cons > br_upper_limit:
continue
bracket_size = br_upper_limit - max(br_lower_limit, base_cons)
cons_in_bracket = min(cons, bracket_size)
else:
cons_in_bracket = cons
# print(eb_rates.columns[bracket], cons_in_bracket, round(eb_rates.loc[year, eb_rates.columns[bracket]], 6), round(eb_rates.loc[year, eb_rates.columns[bracket]] * cons_in_bracket,2))
eb += eb_rates.loc[year, eb_rates.columns[bracket]] * cons_in_bracket
ode += ode_rates.loc[year, ode_rates.columns[bracket]] * cons_in_bracket
cons -= cons_in_bracket
if cons == 0:
break
return -round(eb, 2), -round(ode, 2)
def get_tax_tables(commodity):
"""Get EB and ODE tax rate tables from json files.
Returns two tax rate tables as DataFrame.
If table is not up-to-date, try use update_tax_tables() function.
"""
folder = Path(__file__).resolve().parent / "data" / "tax_tariffs"
eb_table = pd.read_json(folder / f"{commodity}_eb.json")
ode_table = pd.read_json(folder / f"{commodity}_ode.json")
if commodity == "electricity":
ode_table.drop(columns=ode_table.columns[3], inplace=True)
else:
eb_table.drop(columns=eb_table.columns[0], inplace=True)
if commodity != "gas_horticulture":
eb_table.drop(columns=eb_table.columns[3], inplace=True)
return eb_table, ode_table
def get_tax_rate(commodity, year, tax_bracket, perMWh=True):
"""Get tax rate for specific year and tax bracket.
Parameters:
-----------
commodity : str
{'gas' or 'electricity'}
year : int
{2013 - current year}
tax_bracket : int
{1 - 4}
For Gas:
1: 0 - 170.000 m3
3: 170.000 - 1 mln. m3
4: 1 mln. - 10 mln. m3
5: > 10 mln. m3
For Electricity:
1: 0 - 10 MWh
2: 10 - 50 MWh
3: 50 - 10.000 MWh
4: > 10.000 MWh
perMWh : bool
Defaults to True. Will return rates (for gas) in /MWh instead of /m3.
Returns:
--------
Dictionary with EB, ODE and combined rates (in /MWh for electricity and /m3 for gas)
{'EB' : float
'ODE' : float,
'EB+ODE' : float}
"""
eb_table, ode_table = get_tax_tables(commodity)
eb_rate = eb_table.loc[year, :].iloc[tax_bracket - 1].astype(float).round(5) * 1000
ode_rate = (
ode_table.loc[year, :].iloc[tax_bracket - 1].astype(float).round(5) * 1000
)
if commodity == "gas" and perMWh == True:
eb_rate /= 9.769
ode_rate /= 9.769
comb_rate = (eb_rate + ode_rate).round(5)
return {"EB": eb_rate, "ODE": ode_rate, "EB+ODE": comb_rate}
def update_tax_tables():
"""Function to get EB and ODE tax rate tables from belastingdienst.nl and save as json file."""
url = (
"https://www.belastingdienst.nl/wps/wcm/connect/bldcontentnl/belastingdienst/"
"zakelijk/overige_belastingen/belastingen_op_milieugrondslag/tarieven_milieubelastingen/"
"tabellen_tarieven_milieubelastingen?projectid=6750bae7-383b-4c97-bc7a-802790bd1110"
)
tables = pd.read_html(url)
table_index = {
3: "gas_eb",
4: "gas_horticulture_eb",
6: "electricity_eb",
8: "gas_ode",
9: "gas_horticulture_ode",
10: "electricity_ode",
}
for key, val in table_index.items():
table = tables[key].astype(str)
table = table.applymap(lambda x: x.strip("*"))
table = table.applymap(lambda x: x.strip(""))
table = table.applymap(lambda x: x.replace(",", "."))
table = table.astype(float)
table["Jaar"] = table["Jaar"].astype(int)
table.set_index("Jaar", inplace=True)
path = Path(__file__).resolve().parent / "data" / "tax_tariffs" / f"{val}.json"
table.to_json(path)
def calc_grid_costs(
peakload_kW,
grid_operator,
year,
connection_type,
totalcons_kWh=0,
kw_contract_kW=None,
path=None,
modelled_time_period_years = 1
):
"""Calculate grid connection costs for one full year
Parameters:
-----------
peakload_kW : numeric or list
Peak load in kW. Can be single value (for entire year) or value per month (list).
grid_operator : str
{'tennet', 'liander', 'enexis', 'stedin'}
year : int
Year to get tariffs for, e.g. 2020
connection_type : str
Type of grid connection, e.g. 'TS' or 'HS'.
Definitions are different for each grid operator.
totalcons_kWh : numeric
Total yearly consumption in kWh
kw_contract_kW : numeric
in kW. If provided, function will assume fixed value kW contract
path : str
Path to directory with grid tariff files.
Default is None; function will to look for default folder on SharePoint.
Returns:
--------
Total variable grid connection costs in /year (fixed costs 'vastrecht' nog included)
"""
totalcons_kWh /= modelled_time_period_years
tariffs = get_grid_tariffs_electricity(grid_operator, year, connection_type, path)
kw_max_kW = np.mean(peakload_kW)
max_peakload_kW = np.max(peakload_kW)
if kw_contract_kW is None:
kw_contract_kW = False
if bool(kw_contract_kW) & (kw_contract_kW < max_peakload_kW):
warnings.warn(
"Maximum peak consumption is higher than provided 'kw_contract' value."
"Will continue to assume max peak consumption as kW contract."
)
kw_contract_kW = max_peakload_kW
if not bool(kw_contract_kW):
kw_contract_kW = max_peakload_kW
if (tariffs["kWh tarief"] != 0) and (totalcons_kWh is None):
raise ValueError(
"For this grid connection type a tariff for kWh has to be paid. "
"Therefore 'totalcons_kWh' can not be None."
)
return {
"Variable": -round(tariffs["kWh tarief"] * abs(totalcons_kWh) * modelled_time_period_years, 2),
"kW contract": -round(tariffs["kW contract per jaar"] * kw_contract_kW * modelled_time_period_years, 2),
"kW max": -round(tariffs["kW max per jaar"] * max_peakload_kW * modelled_time_period_years, 2),
}
def get_grid_tariffs_electricity(grid_operator, year, connection_type, path=None):
"""Get grid tranposrt tariffs
Parameters:
-----------
grid_operator : str
{'tennet', 'liander', 'enexis', 'stedin'}
year : int
Year to get tariffs for, e.g. 2020
connection_type : str
Type of grid connection, e.g. 'TS' or 'HS'.
Definitions are different for each grid operator.
path : str
Path to directory with grid tariff files.
Default is None; function will to look for default folder on SharePoint.
Returns:
--------
Dictionary containing grid tariffs in /kW/year and /kWh
"""
if path is None:
path = Path(__file__).resolve().parent / "data" / "grid_tariffs"
else:
path = Path(path)
if not path.exists():
raise SystemError(
f"Path '{path}' not found. Specify different path and try again."
)
filename = f"{grid_operator.lower()}_{year}.csv"
filepath = path / filename
if not filepath.exists():
raise NotImplementedError(
f"File '{filename}' does not exist. Files available: {[file.name for file in path.glob('*.csv')]}"
)
rates_table = pd.read_csv(
path / filename, sep=";", decimal=",", index_col="Aansluiting"
)
if connection_type not in rates_table.index:
raise ValueError(
f"The chosen connection type '{connection_type}' is not available "
f"for grid operator '{grid_operator}'. Please choose one of {list(rates_table.index)}."
)
return rates_table.loc[connection_type, :].to_dict()
def income_tax(ebit, fixed_tax_rate):
"""
Calculates income tax based on EBIT.
2021 tax rates
"""
if fixed_tax_rate:
return round(ebit * -0.25, 0)
if ebit > 245_000:
return round(245_000 * -0.15 + (ebit - 200_000) * -0.25, 2)
if ebit < 0:
return 0
else:
return -round(ebit * 0.15, 2)
def calc_business_case(
capex,
discount_rate,
project_duration,
depreciation,
residual_value,
regular_earnings,
irregular_cashflows=0,
eia=False,
vamil=False,
fixed_income_tax=False
):
"""Calculate NPV and IRR for business case.
All input paremeters are either absolute or relative to a baseline.
Parameters:
-----------
capex : numeric
Total CAPEX or extra CAPEX compared to baseline
discount_rate : numeric
% as decimal value
project_duration : numeric
in years
depreciation : numeric of list
Yearly depreciation costs
residual_value : numeric
Residual value at end of project in , total or compared to baseline.
regular_earnings : numeric
Regular earnings, usually EBITDA
irregular_cashflows : list
Pass list with value for each year.
eia : bool
Apply EIA ("Energie Investerings Aftrek") tax discounts.
Defaults to False.
vamil : bool
Apply VAMIL ("Willekeurige afschrijving milieu-investeringen") tax discounts.
Defaults to False.
Returns:
--------
DataFrame showing complete calculation resulting in NPV and IRR
"""
years = [f"Year {y}" for y in range(project_duration + 1)]
years_o = years[1:]
bc_calc = pd.DataFrame(columns=years)
bc_calc.loc["CAPEX (€)", "Year 0"] = -capex
bc_calc.loc["Regular Earnings (€)", years_o] = regular_earnings
bc_calc.loc["Irregular Cashflows (€)", years_o] = irregular_cashflows
bc_calc.loc["EBITDA (€)", years_o] = (
bc_calc.loc["Regular Earnings (€)", years_o]
+ bc_calc.loc["Irregular Cashflows (€)", years_o]
)
depreciations = [depreciation] * project_duration
if vamil:
ebitdas = bc_calc.loc["EBITDA (€)", years_o].to_list()
depreciations = _apply_vamil(depreciations, project_duration, ebitdas)
bc_calc.loc["Depreciations (€) -/-", years_o] = np.array(depreciations) * -1
bc_calc.loc["EBIT (€)", years_o] = (
bc_calc.loc["EBITDA (€)", years_o]
+ bc_calc.loc["Depreciations (€) -/-", years_o]
)
if eia:
bc_calc = _apply_eia(bc_calc, project_duration, capex, years_o)
bc_calc.loc["Income tax (Vpb.) (€)", years_o] = bc_calc.loc["EBIT (€)", :].apply(
income_tax, args=[fixed_income_tax]
)
if eia:
bc_calc.loc["NOPLAT (€)", years_o] = (
bc_calc.loc["EBIT before EIA (€)", :]
+ bc_calc.loc["Income tax (Vpb.) (€)", years_o]
)
else:
bc_calc.loc["NOPLAT (€)", years_o] = (
bc_calc.loc["EBIT (€)", :] + bc_calc.loc["Income tax (Vpb.) (€)", years_o]
)
bc_calc.loc["Depreciations (€) +/+", years_o] = depreciations
bc_calc.loc["Free Cash Flow (€)", years] = (
bc_calc.loc["CAPEX (€)", years].fillna(0)
+ bc_calc.loc["NOPLAT (€)", years].fillna(0)
+ bc_calc.loc["Depreciations (€) +/+", years].fillna(0)
)
spp = calc_simple_payback_time(
capex=capex,
free_cashflows=bc_calc.loc["Free Cash Flow (€)", years_o].values,
)
bc_calc.loc["Simple Payback Period", "Year 0"] = spp
try:
bc_calc.loc["IRR (%)", "Year 0"] = (
npf.irr(bc_calc.loc["Free Cash Flow (€)", years].values) * 100
)
except:
bc_calc.loc["IRR (%)", "Year 0"] = np.nan
bc_calc.loc["WACC (%)", "Year 0"] = discount_rate * 100
bc_calc.loc["NPV of explicit period (€)", "Year 0"] = npv(
discount_rate, bc_calc.loc["Free Cash Flow (€)"].values
)
bc_calc.loc["Discounted residual value (€)", "Year 0"] = (
residual_value / (1 + discount_rate) ** project_duration
)
bc_calc.loc["NPV (€)", "Year 0"] = (
bc_calc.loc["NPV of explicit period (€)", "Year 0"]
# + bc_calc.loc["Discounted residual value (€)", "Year 0"]
)
return bc_calc.round(2)
def calc_simple_payback_time(capex, free_cashflows):
if free_cashflows.sum() < capex:
return np.nan
year = 0
spp = 0
while capex > 0:
cashflow = free_cashflows[year]
spp += min(capex, cashflow) / cashflow
capex -= cashflow
year += 1
return round(spp, 1)
def _apply_vamil(depreciations, project_duration, ebitdas):
remaining_depr = sum(depreciations)
remaining_vamil = 0.75 * remaining_depr
for i in range(project_duration):
vamil_depr = min(ebitdas[i], remaining_vamil) if remaining_vamil > 0 else 0
if remaining_depr > 0:
lin_depr = remaining_depr / (project_duration - i)
depr = max(vamil_depr, lin_depr)
depreciations[i] = max(vamil_depr, lin_depr)
remaining_vamil -= vamil_depr
remaining_depr -= depr
else:
depreciations[i] = 0
return depreciations
def _apply_eia(bc_calc, project_duration, capex, years_o):
remaining_eia = 0.45 * capex
eia_per_year = [0] * project_duration
bc_calc = bc_calc.rename(index={"EBIT (€)": "EBIT before EIA (€)"})
ebits = bc_calc.loc["EBIT before EIA (€)", years_o].to_list()
eia_duration = min(10, project_duration)
for i in range(eia_duration):
if remaining_eia > 0:
eia_curr_year = max(min(remaining_eia, ebits[i]), 0)
eia_per_year[i] = eia_curr_year
remaining_eia -= eia_curr_year
else:
break
bc_calc.loc["EIA (€)", years_o] = np.array(eia_per_year) * -1
bc_calc.loc["EBIT (€)", :] = (
bc_calc.loc["EBIT before EIA (€)", :] + bc_calc.loc["EIA (€)", :]
)
return bc_calc
def calc_irf_value(
data, irf_volume, nomination_col=None, realisation_col=None, reco_col="reco"
):
"""Calculate IRF value
Takes a DataFrame [data] and returns the same DataFrame with a new column "IRF Value"
Parameters
----------
data : DataFrame
DataFrame that contains data. Should include price data (DAM, POS and NEG).
irf_volume : int
Volume on IRF in MW.
nomination_col : str
Name of the column containing nomination data in MWh.
realisation_col : str
Name of the column containing realisation data in MWh.
reco_col : str
Name of the column contaning recommendations.
"""
if not nomination_col:
nomination_col = "zero_nom"
data[nomination_col] = 0
if not realisation_col:
realisation_col = "zero_nom"
data[realisation_col] = 0
conversion_factor = pd.to_timedelta(data.index.freq) / timedelta(hours=1)
imb_pre_irf = data[realisation_col] - data[nomination_col]
result_pre_irf = (
data[nomination_col] * data["DAM"]
+ imb_pre_irf.where(imb_pre_irf > 0, other=0) * data["POS"]
+ imb_pre_irf.where(imb_pre_irf < 0, other=0) * data["NEG"]
)
data["IRF Nom"] = (
data[nomination_col] - data[reco_col] * irf_volume * conversion_factor
)
data["IRF Imb"] = data[realisation_col] - data["IRF Nom"]
result_post_irf = (
data["IRF Nom"] * data["DAM"]
+ data["IRF Imb"].where(data["IRF Imb"] > 0, other=0) * data["POS"]
+ data["IRF Imb"].where(data["IRF Imb"] < 0, other=0) * data["NEG"]
)
data["IRF Value"] = result_post_irf - result_pre_irf
return data

@ -0,0 +1,343 @@
import json
import os
import time
import pytz
from datetime import datetime, timedelta
from pathlib import Path
import numpy as np
import pandas as pd
import requests
from pyrecoy.prices import *
class Forecast:
"""Load dataset from SharePoint server as DataFrame in local datetime format.
Parameters:
----------
filename : str
Name of the csv file, e.g. "marketprices_nl.csv"
start : datetime
Startdate of the dataset
end : datetime
Enddate of the dataset
freq : str
{'1T', '15T', 'H'}
Time frequency of the data
folder_path : str
Local path to forecast data on Recoy SharePoint,
e.g. "C:/Users/username/Recoy/Recoy - Documents/03 - Libraries/12 - Data Management/Forecast Data/"
"""
def __init__(self, filename, start=None, end=None, freq="15T", folder_path=None, from_database=False, add_days_to_start_end=False):
self.file = filename
self.from_database = from_database
if isinstance(start, str):
start = datetime.strptime(start, "%Y-%m-%d").astimezone(pytz.timezone('Europe/Amsterdam'))
print(start)
if isinstance(end, str):
end = datetime.strptime(end, "%Y-%m-%d").astimezone(pytz.timezone('Europe/Amsterdam'))
print(end)
self.data = self.get_dataset(start, end, freq, folder_path=folder_path, add_days_to_start_end=add_days_to_start_end)
# print(self.data)
if len(self.data) == 0:
raise Exception("No data available for those dates.")
def get_dataset(self, start, end, freq, folder_path=None, add_days_to_start_end=False):
if folder_path is None and self.from_database:
if add_days_to_start_end:
start = start + timedelta(days=-1)
end = end + timedelta(days=1)
start = start.astimezone(pytz.utc)
end = end.astimezone(pytz.utc)
dam = get_day_ahead_prices_from_database(start, end, 'NLD')
dam = dam.resample('15T').ffill()
imb = get_imbalance_prices_from_database(start, end, 'NLD')
data = pd.concat([imb, dam], axis='columns')
data = data[['DAM', 'POS', 'NEG']]
data = data.tz_convert('Europe/Amsterdam')
# data = data.loc[(data.index >= start) & (data.index < end)]
return data
else:
if folder_path is None:
folder_path = Path(os.environ["FORECAST_DATA_FOLDER"])
else:
folder_path = Path(folder_path)
data = pd.read_csv(
folder_path / self.file,
delimiter=";",
decimal=",",
parse_dates=False,
index_col="datetime",
)
ix_start = pd.to_datetime(data.index[0], utc=True).tz_convert(
"Europe/Amsterdam"
)
ix_end = pd.to_datetime(data.index[-1], utc=True).tz_convert("Europe/Amsterdam")
new_idx = pd.date_range(ix_start, ix_end, freq=freq, tz="Europe/Amsterdam")
if len(new_idx) == len(data.index):
data.index = new_idx
else:
print(f"Warning: Entries missing from dataset '{self.file}'.")
data.index = pd.to_datetime(data.index, utc=True).tz_convert(
"Europe/Amsterdam"
)
data = data.reindex(new_idx)
print("Issue solved: Dataset was reindexed automatically.")
data.index.name = "datetime"
if start and end:
return data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")].round(2)
elif start:
return data[start.strftime("%Y-%m-%d") :].round(2)
elif end:
return data[: end.strftime("%Y-%m-%d")].round(2)
else:
return data.round(2)
def reindex_to_freq(self, freq):
"""Reindex dataset to a different timefrequency.
Parameters:
-----------
freq : string
options: '1T'
"""
ix_start = pd.to_datetime(self.data.index[0], utc=True).tz_convert(
"Europe/Amsterdam"
)
ix_end = pd.to_datetime(self.data.index[-1], utc=True).tz_convert(
"Europe/Amsterdam"
)
idx = pd.date_range(
ix_start, ix_end + timedelta(minutes=14), freq=freq, tz="Europe/Amsterdam"
)
self.data = self.data.reindex(index=idx, method="ffill")
class Mipf(Forecast):
"""Load MIPF dataset from SharePoint server as DataFrame in local datetime format.
Parameters:
----------
start : datetime
Startdate of the dataset
end : datetime
Enddate of the dataset
tidy : bool
Get a dataframe in tidy format (1 minute freq).
include_nextQ : bool
Include forecast for next Quarter hour.
Requires tidy=True to work.
folder_path : str
Local path to forecast data on Recoy SharePoint,
e.g. "C:/Users/username/Recoy/Recoy - Documents/03 - Libraries/12 - Data Management/Forecast Data/"
"""
def __init__(
self, start=None, end=None, tidy=True, include_nextQ=False, folder_path=None
):
self.file = "imbalanceRT_nl.csv"
super().__init__(
self.file, start=start, end=end, freq="15T", folder_path=folder_path
)
if tidy and self.from_database == False:
self.tidy(include_nextQ=include_nextQ)
elif self.from_database == True:
self.addMipfData(include_nextQ=include_nextQ)
def tidy(self, include_price_data=True, include_nextQ=False):
self.data = tidy_mipf(self.data, include_price_data, include_nextQ)
# def addMipfData(self, include_nextQ=False):
def tidy_mipf(data, include_price_data=True, include_nextQ=False):
"""Takes MIPF dataset (unstacked) and turns it into a tidy dataset (stacked).
Parameters:
----------
include_price_data : bool
Set as True if columns 'DAM', 'POS' and 'NEG' data should be included in the output.
include_nextQ : bool
Set to True to include next Qh forecast
"""
mipf_pos = data[[f"POS_horizon{h}" for h in np.flip(np.arange(3, 18))]].copy()
mipf_neg = data[[f"NEG_horizon{h}" for h in np.flip(np.arange(3, 18))]].copy()
cols = ["ForePos", "ForeNeg"]
dfs = [mipf_pos, mipf_neg]
if include_nextQ:
pos_nextQ = data[[f"POS_horizon{h}" for h in np.flip(np.arange(18, 30))]].copy()
neg_nextQ = data[[f"NEG_horizon{h}" for h in np.flip(np.arange(18, 30))]].copy()
for h in np.arange(30, 33):
pos_nextQ.insert(0, f"POS_horizon{h}", np.NaN)
neg_nextQ.insert(0, f"POS_horizon{h}", np.NaN)
cols += ["ForePos_nextQ", "ForeNeg_nextQ"]
dfs += [pos_nextQ, neg_nextQ]
tidy_df = pd.DataFrame()
for df, col in zip(dfs, cols):
df.columns = range(15)
df.reset_index(drop=True, inplace=True)
df.reset_index(inplace=True)
df_melt = (
df.melt(id_vars=["index"], var_name="min", value_name=col)
.sort_values(["index", "min"])
.reset_index(drop=True)
)
tidy_df[col] = df_melt[col]
ix_start = data.index[0]
ix_end = data.index[-1] + timedelta(minutes=14)
tidy_df.index = pd.date_range(ix_start, ix_end, freq="1T", tz="Europe/Amsterdam")
tidy_df.index.name = "datetime"
if include_price_data:
for col in np.flip(["DAM", "POS", "NEG", "regulation state"]):
try:
price_col = data.loc[:, col].reindex(
index=tidy_df.index, method="ffill"
)
if col == "regulation state":
price_col.name = "RS"
tidy_df = pd.concat([price_col, tidy_df], axis="columns")
except Exception as e:
print(e)
return tidy_df
class Qipf(Forecast):
"""Load QIPF dataset from SharePoint server as DataFrame in local datetime format.
Parameters:
----------
start : datetime
Startdate of the dataset
end : datetime
Enddate of the dataset
folder_path : str
Local path to forecast data on Recoy SharePoint,
e.g. "C:/Users/username/Recoy/Recoy - Documents/03 - Libraries/12 - Data Management/Forecast Data/"
"""
def __init__(self, start=None, end=None, freq="15T", folder_path=None):
self.file = "imbalance_nl.csv"
self.data = self.get_dataset(start, end, "15T", folder_path=folder_path)
if freq != "15T":
self.reindex_to_freq(freq)
class Irf(Forecast):
"""Load QIPF dataset from SharePoint server as DataFrame in local datetime format."""
def __init__(
self, country, horizon, start=None, end=None, freq="60T", folder_path=None
):
if freq == "15T":
self.file = f"irf_{country}_{horizon}_15min.csv"
else:
self.file = f"irf_{country}_{horizon}.csv"
self.data = self.get_dataset(start, end, freq, folder_path=folder_path)
return data
class NsideApiRequest:
"""
Request forecast data from N-SIDE API
If request fails, code will retry 5 times by default.
Output on success: data as DataFrame, containing forecast data. Index is timezone-aware datetime (Dutch time).
Output on error: []
"""
def __init__(
self,
endpoint,
country,
start=None,
end=None,
auth_token=None,
):
if not auth_token:
try:
auth_token = os.environ["NSIDE_API_KEY"]
except:
raise ValueError("N-SIDE token not provided.")
self.data = self.get_data(auth_token, endpoint, country, start, end)
def get_data(self, token, endpoint, country, start, end):
if start is not None:
start = pd.to_datetime(start).strftime("%Y-%m-%d")
if end is not None:
end = pd.to_datetime(end).strftime("%Y-%m-%d")
url = f"https://energy-forecasting-api.eu.n-side.com/api/forecasts/{country}/{endpoint}"
if start and end:
url += f"?from={start}&to={end}"
print(url)
headers = {"Accept": "application/json", "Authorization": f"Token {token}"}
retry = 5
self.success = False
i = 0
while i <= retry:
resp = requests.get(url, headers=headers)
self.statuscode = resp.status_code
if self.statuscode == requests.codes.ok:
self.content = resp.content
json_data = json.loads(self.content)
data = pd.DataFrame(json_data["records"])
data = data.set_index("datetime")
data.index = pd.to_datetime(data.index, utc=True).tz_convert(
"Europe/Amsterdam"
)
self.success = True
return data.sort_index()
else:
print(
f"Attempt failled, status code {str(self.statuscode)}. Trying again..."
)
time.sleep(5)
i += 1
if not self.success:
print(
"Request failed. Please contact your Recoy contact person or try again later."
)
return []

@ -0,0 +1,60 @@
import warnings
from datetime import datetime, timedelta
import pandas as pd
import pytz
class TimeFramework:
"""
Representation of the modelled timeperiod.
Variables in this class are equal for all CaseStudies.
"""
def __init__(self, start, end):
if type(start) is str:
start = pytz.timezone("Europe/Amsterdam").localize(
datetime.strptime(start, "%Y-%m-%d")
)
if type(end) is str:
end = pytz.timezone("Europe/Amsterdam").localize(
datetime.strptime(end, "%Y-%m-%d")
)
end += timedelta(days=1)
end -= timedelta(minutes=1)
self.start = start
self.end = end
amount_of_days = 365
if start.year % 4 == 0:
amount_of_days = 366
self.days = (self.end - self.start + timedelta(days=1)) / timedelta(days=1)
self.modelled_time_period_years = (end - start).total_seconds() / (3600 * 24 * amount_of_days)
if self.days != 365:
warnings.warn(
f"The chosen timeperiod spans {self.days} days, "
"which is not a full year. Beware that certain "
"functions that use yearly rates might return "
"incorrect values."
)
def dt_index(self, freq):
# Workaround to make sure time range is always complete,
# Even with DST changes
# end = self.end + timedelta(days=1) # + timedelta(hours=1)
# end = self.end
# end - timedelta(end.hour)
return pd.date_range(
start=self.start,
end=self.end,
freq=freq,
tz="Europe/Amsterdam",
# inclusive="left",
name="datetime",
)

@ -0,0 +1,226 @@
import numpy as np
import pandas as pd
import warnings
from tqdm.notebook import tqdm
from .prices import get_tennet_data, get_balansdelta_nl
from .forecasts import Forecast
# TODO: This whole thing needs serious refactoring /MK
def generate_intelligent_baseline(startdate, enddate):
bd = get_balansdelta_nl(start=startdate, end=enddate)
bd.drop(
columns=[
"datum",
"volgnr",
"tijd",
"IGCCBijdrage_op",
"IGCCBijdrage_af",
"opregelen_reserve",
"afregelen_reserve",
],
inplace=True,
)
net_regelvolume = bd["opregelen"] - bd["Afregelen"]
bd.insert(2, "net_regelvolume", net_regelvolume)
vol_delta = bd["net_regelvolume"].diff(periods=1)
bd.insert(3, "vol_delta", vol_delta)
pc = get_tennet_data(
exporttype="verrekenprijzen", start=startdate, end=enddate
).reindex(index=bd.index, method="ffill")[["prikkelcomponent"]]
if len(pc) == 0:
pc = pd.Series(0, index=bd.index)
prices = Forecast("marketprices_nl.csv", start=startdate, end=enddate)
prices.reindex_to_freq("1T")
prices = prices.data
inputdata = pd.concat([prices, bd, pc], axis=1)
Qhs = len(inputdata) / 15
if Qhs % 1 > 0:
raise Exception(
"A dataset with incomplete quarter-hours was passed in, please insert new dataset!"
)
data = np.array([inputdata[col].to_numpy() for col in inputdata.columns])
lstoutput = []
for q in tqdm(range(int(Qhs))):
q_data = [col[q * 15 : (q + 1) * 15] for col in data]
q_output = apply_imbalance_logic_for_quarter_hour(q_data)
if lstoutput:
for (ix, col) in enumerate(lstoutput):
lstoutput[ix] += q_output[ix]
else:
lstoutput = q_output
ib = pd.DataFrame(
lstoutput,
index=[
"DAM",
"POS",
"NEG",
"regulation state",
"ib_inv",
"ib_afn",
"ib_rt",
"nv_op",
"nv_af",
"opgeregeld",
"afgeregeld",
],
).T
ib.index = inputdata.index
return ib
def apply_imbalance_logic_for_quarter_hour(q_data):
[nv_op, nv_af, opgeregeld, afgeregeld] = [False] * 4
lst_inv = [np.NaN] * 15
lst_afn = [np.NaN] * 15
lst_rt = [np.NaN] * 15
lst_nv_op = [np.NaN] * 15
lst_nv_af = [np.NaN] * 15
lst_afr = [np.NaN] * 15
lst_opr = [np.NaN] * 15
mins = iter(range(15))
for m in mins:
[
DAM,
POS,
NEG,
rt,
vol_op,
vol_af,
net_vol,
delta_vol,
nood_op,
nood_af,
prijs_hoog,
prijs_mid,
prijs_laag,
prikkelc,
] = [col[0 : m + 1] for col in q_data]
delta_vol[0] = 0
if nood_op.sum() > 0:
nv_op = True
if nood_af.sum() > 0:
nv_af = True
if pd.notna(prijs_hoog).any() > 0:
opgeregeld = True
if pd.notna(prijs_laag).any() > 0:
afgeregeld = True
if (opgeregeld == True) and (afgeregeld == False):
regeltoestand = 1
elif (opgeregeld == False) and (afgeregeld == True):
regeltoestand = -1
elif (opgeregeld == False) and (afgeregeld == False):
if nv_op == True:
regeltoestand = 1
if nv_af == True:
regeltoestand = -1
else:
regeltoestand = 0
else:
# Zowel opregeld als afregeld > kijk naar trend
# Continue niet-dalend: RT1
# Continue dalend: RT -1
# Geen continue trend: RT 2
if all(i >= 0 for i in delta_vol):
regeltoestand = 1
elif all(i <= 0 for i in delta_vol):
regeltoestand = -1
else:
regeltoestand = 2
# Bepaal de verwachte onbalansprijzen
dam = DAM[0]
pc = prikkelc[0]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hoogste_prijs = np.nanmax(prijs_hoog)
mid_prijs = prijs_mid[-1]
laagste_prijs = np.nanmin(prijs_laag)
if regeltoestand == 0:
prijs_inv = mid_prijs
prijs_afn = mid_prijs
elif regeltoestand == -1:
if nv_af:
prijs_afn = np.nanmin((dam - 200, laagste_prijs))
else:
prijs_afn = laagste_prijs
prijs_inv = prijs_afn
elif regeltoestand == 1:
if nv_op:
prijs_inv = np.nanmax((dam + 200, hoogste_prijs))
else:
prijs_inv = hoogste_prijs
prijs_afn = prijs_inv
elif regeltoestand == 2:
if nv_op:
prijs_afn = np.nanmax((dam + 200, hoogste_prijs, mid_prijs))
else:
prijs_afn = np.nanmax((mid_prijs, hoogste_prijs))
if nv_af:
prijs_inv = np.nanmin((dam - 200, laagste_prijs, mid_prijs))
else:
prijs_inv = np.nanmin((mid_prijs, laagste_prijs))
prijs_inv -= pc
prijs_afn += pc
lst_inv[m] = prijs_inv
lst_afn[m] = prijs_afn
lst_rt[m] = regeltoestand
lst_nv_op[m] = nv_op
lst_nv_af[m] = nv_af
lst_opr[m] = opgeregeld
lst_afr[m] = afgeregeld
return [
list(DAM),
list(POS),
list(NEG),
list(rt),
lst_inv,
lst_afn,
lst_rt,
lst_nv_op,
lst_nv_af,
lst_opr,
lst_afr,
]

@ -0,0 +1,143 @@
import plotly.graph_objects as go
from millify import millify
from plotly import figure_factory as ff
from .colors import *
from .reports import SingleFigureComparison
def npv_bar_chart(
cases, color=recoydarkblue, title="NPV Comparison in k€", n_format="%{text:.3s}"
):
series = SingleFigureComparison(cases, "npv", "NPV (€)").report
case_names = series.index
npvs = series.values
return single_figure_barchart(npvs, case_names, title, color, n_format)
def irr_bar_chart(
cases, color=recoydarkblue, title="IRR Comparison in %", n_format="%{text:.1f}%"
):
series = SingleFigureComparison(cases, "irr", "IRR (€)").report
case_names = series.index
irrs = series.values * 100
return single_figure_barchart(irrs, case_names, title, color, n_format)
def ebitda_bar_chart(
cases, color=recoydarkblue, title="EBITDA comparison in k€", n_format="%{text:.3s}"
):
series = SingleFigureComparison(cases, "ebitda", "EBITDA (€)").report
case_names = series.index
ebitdas = series.values
return single_figure_barchart(ebitdas, case_names, title, color, n_format)
def capex_bar_chart(
cases, color=recoydarkblue, title="CAPEX comparison in k€", n_format="%{text:.3s}"
):
series = SingleFigureComparison(cases, "total_capex", "CAPEX (€)").report
case_names = series.index
capex = series.values * -1
return single_figure_barchart(capex, case_names, title, color, n_format)
def single_figure_barchart(y_values, x_labels, title, color, n_format):
fig = go.Figure()
fig.add_trace(
go.Bar(
x=x_labels,
y=y_values,
text=y_values,
marker_color=color,
cliponaxis=False,
)
)
fig.update_layout(title=title)
ymin = min(y_values.min(), 0) * 1.1
ymax = max(y_values.max(), 0) * 1.1
fig.update_yaxes(range=[ymin, ymax])
fig.update_traces(texttemplate=n_format, textposition="outside")
return fig
def heatmap(
data,
title=None,
labels=None,
colormap="reds",
mult_factor=1,
decimals=2,
min_value=None,
max_value=None,
width=600,
height=400,
hover_prefix=None,
reversescale=False,
):
data_lists = (data * mult_factor).round(decimals).values.tolist()
xs = data.columns.tolist()
ys = data.index.to_list()
annotations = (
(data * mult_factor)
.applymap(lambda x: millify(x, precision=decimals))
.values.tolist()
)
if hover_prefix:
hover_labels = [
[f"{hover_prefix} {ann}" for ann in sublist] for sublist in annotations
]
else:
hover_labels = annotations
# This is an ugly trick to fix a bug with
# the axis labels not showing correctly
xs_ = [f"{str(x)}_" for x in xs]
ys_ = [f"{str(y)}_" for y in ys]
fig = ff.create_annotated_heatmap(
data_lists,
x=xs_,
y=ys_,
annotation_text=annotations,
colorscale=colormap,
showscale=True,
text=hover_labels,
hoverinfo="text",
reversescale=reversescale,
)
# Part 2 of the bug fix
fig.update_xaxes(tickvals=xs_, ticktext=xs)
fig.update_yaxes(tickvals=ys_, ticktext=ys)
fig.layout.xaxis.type = "category"
fig.layout.yaxis.type = "category"
fig["layout"]["xaxis"].update(side="bottom")
if min_value:
fig["data"][0]["zmin"] = min_value * mult_factor
if max_value:
fig["data"][0]["zmax"] = max_value * mult_factor
if labels:
xlabel = labels[0]
ylabel = labels[1]
else:
xlabel = data.columns.name
ylabel = data.index.name
fig.update_xaxes(title=xlabel)
fig.update_yaxes(title=ylabel)
if title:
fig.update_layout(
title=title,
title_x=0.5,
title_y=0.85,
width=width,
height=height,
)
return fig

@ -0,0 +1,765 @@
import os
from datetime import timedelta
from io import BytesIO
from pathlib import Path
from zipfile import ZipFile
import time
import warnings
import json
import pytz
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from entsoe.entsoe import EntsoePandasClient
from sqlalchemy import MetaData, Table, insert, and_, or_
from pyrecoy import *
def get_fcr_prices(start, end, freq="H") -> pd.DataFrame:
"""Get FCR settlement prices from Regelleistung website
Returns: DataFrame with FCR prices with index with given time frequency in local time.
"""
start = start + timedelta(-1)
end = end + timedelta(1)
data = get_FCR_prices_from_database(start, end, 'NLD')
data = data.resample('15T').ffill()
data = data[['PricePerMWPerISP']]
data.columns = ['FCR NL (EUR/ISP)']
data.index.name = 'datetime'
data = data.tz_convert('Europe/Amsterdam')
return data
path = Path(
f"./data/fcr_prices_{freq}_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(
startdate, enddate, freq=freq, tz="Europe/Amsterdam", name="datetime"
)
return df
dfs = []
retry = 5
for date in pd.date_range(start=start, end=end + timedelta(days=1)):
r = 0
# print(f'DEBUG: {date}')
while r < retry:
try:
url = (
f"https://www.regelleistung.net/apps/cpp-publisher/api/v1/download/tenders/"
f"resultsoverview?date={date.strftime('%Y-%m-%d')}&exportFormat=xlsx&market=CAPACITY&productTypes=FCR"
)
df = pd.read_excel(url, engine="openpyxl")[
[
"DATE_FROM",
"PRODUCTNAME",
"NL_SETTLEMENTCAPACITY_PRICE_[EUR/MW]",
"DE_SETTLEMENTCAPACITY_PRICE_[EUR/MW]",
]
]
# print(f'DEBUG: {date} read in')
dfs.append(df)
break
except Exception:
# print(r)
time.sleep(1)
r += 1
warnings.warn(
f'No data received for {date.strftime("%Y-%m-%d")}. Retrying...({r}/{retry})'
)
if r == retry:
raise RuntimeError(f'No data received for {date.strftime("%Y-%m-%d")}')
df = pd.concat(dfs, axis=0)
df["hour"] = df["PRODUCTNAME"].map(lambda x: int(x.split("_")[1]))
df["Timeblocks"] = (
df["PRODUCTNAME"].map(lambda x: int(x.split("_")[2])) - df["hour"]
)
df.index = df.apply(
lambda row: pd.to_datetime(row["DATE_FROM"]) + timedelta(hours=row["hour"]),
axis=1,
).dt.tz_localize("Europe/Amsterdam")
df.drop(columns=["DATE_FROM", "PRODUCTNAME", "hour"], inplace=True)
df.rename(
columns={
"NL_SETTLEMENTCAPACITY_PRICE_[EUR/MW]": f"FCR Price NL [EUR/MW/{freq}]",
"DE_SETTLEMENTCAPACITY_PRICE_[EUR/MW]": f"FCR Price DE [EUR/MW/{freq}]",
},
inplace=True,
)
try:
df[f"FCR Price NL [EUR/MW/{freq}]"] = df[
f"FCR Price NL [EUR/MW/{freq}]"
].astype(float)
df[f"FCR Price DE [EUR/MW/{freq}]"] = df[
f"FCR Price NL [EUR/MW/{freq}]"
].astype(float)
except Exception as e:
warnings.warn(
f"Could not convert data to floats. Should check... Exception: {e}"
)
df = df[~df.index.duplicated(keep="first")]
new_ix = pd.date_range(
start=df.index[0], end=df.index[-1], freq=freq, tz="Europe/Amsterdam"
)
df = df.reindex(new_ix, method="ffill")
mult = {"H": 1, "4H": 4, "D": 24}
df[f"FCR Price NL [EUR/MW/{freq}]"] /= df["Timeblocks"] / mult[freq]
df[f"FCR Price DE [EUR/MW/{freq}]"] /= df["Timeblocks"] / mult[freq]
df = df[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
df.to_csv(path, sep=";", decimal=",", index_label="datetime")
return df
def get_tennet_data(exporttype, start, end):
"""Download data from TenneT API
TenneT documentation:
https://www.tennet.org/bedrijfsvoering/exporteer_data_toelichting.aspx
Parameters:
-----------
exporttype : str
Exporttype as defined in TenneT documentation.
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with API output.
"""
datefrom = start.strftime("%d-%m-%Y")
dateto = end.strftime("%d-%m-%Y")
url = (
f"http://www.tennet.org/bedrijfsvoering/ExporteerData.aspx?exporttype={exporttype}"
f"&format=csv&datefrom={datefrom}&dateto={dateto}&submit=1"
)
return pd.read_csv(url, decimal=",")
def get_imb_prices_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
exporttype = "verrekenprijzen"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["invoeden", "Afnemen", "regeltoestand"]]
data.columns = ["POS", "NEG", "RS"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def get_balansdelta_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
filename = f"balansdelta_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime")
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(startdate, enddate, freq="1T", tz="Europe/Amsterdam")
return data
exporttype = "balansdelta2017"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="1T", tz="Europe/Amsterdam")
data.index = date_ix
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_from_entsoe(start, end, marketagreement_type, entsoe_api_key):
client = EntsoePandasClient(entsoe_api_key)
return client.query_contracted_reserve_prices(
country_code="NL",
start=start,
end=end + timedelta(days=1),
type_marketagreement_type=marketagreement_type,
)
def get_afrr_capacity_fees_nl(start, end, entsoe_api_key=None):
path = Path(
f"./data/afrr_capacity_fees_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(startdate, enddate, freq="D", tz="Europe/Amsterdam")
return df
if not entsoe_api_key:
try:
entsoe_api_key = os.environ["ENTSOE_API_KEY"]
except:
raise ValueError("Please enter ENTSOE API key")
date_to_daily_bids = pd.to_datetime("2020-08-31").tz_localize("Europe/Amsterdam")
if start < date_to_daily_bids:
_start = start - timedelta(days=7)
data = _get_afrr_prices_from_entsoe(
start=_start,
end=min(date_to_daily_bids, end),
marketagreement_type="A02",
entsoe_api_key=entsoe_api_key,
)[["Automatic frequency restoration reserve - Symmetric"]]
if end > date_to_daily_bids:
_end = date_to_daily_bids - timedelta(days=1)
else:
_end = end
dt_index = pd.date_range(start, _end, freq="D", tz="Europe/Amsterdam")
data = data.reindex(dt_index, method="ffill")
# ENTSOE:
# "Before week no. 1 of 2020 the values are published per period
# per MW (Currency/MW per procurement period); meaning that it
# is not divided by MTU/ISP in that period."
if start < pd.to_datetime("2019-12-23"):
data[: pd.to_datetime("2019-12-22")] /= 7 * 24 * 4
if end >= date_to_daily_bids:
_data = (
_get_afrr_prices_from_entsoe(
start=max(date_to_daily_bids, start),
end=end,
marketagreement_type="A01",
entsoe_api_key=entsoe_api_key,
)
.resample("D")
.first()
)
cols = [
"Automatic frequency restoration reserve - Down",
"Automatic frequency restoration reserve - Symmetric",
"Automatic frequency restoration reserve - Up",
]
for col in cols:
if col not in _data.columns:
_data[col] = np.NaN
_data = _data[cols]
try:
data = pd.concat([data, _data], axis=0)
except Exception:
data = _data
data = data[start:end]
new_col_names = {
"Automatic frequency restoration reserve - Down": "aFRR Down [€/MW/day]",
"Automatic frequency restoration reserve - Symmetric": "aFRR Symmetric [€/MW/day]",
"Automatic frequency restoration reserve - Up": "aFRR Up [€/MW/day]",
}
data.rename(columns=new_col_names, inplace=True)
hours_per_day = (
pd.Series(
data=0,
index=pd.date_range(
start,
end + timedelta(days=1),
freq="15T",
tz="Europe/Amsterdam",
inclusive="left",
),
)
.resample("D")
.count()
)
data = data.multiply(hours_per_day.values, axis=0).round(2)
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_nl_from_tennet(start, end):
"""Get aFRR prices from TenneT API
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with imbalance prices.
"""
filename = f"afrr_prices_nl_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(
float
)
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(
startdate, enddate, freq="15T", tz="Europe/Amsterdam"
)
return data
data = get_tennet_data("verrekenprijzen", start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, "verrekenprijzen")
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["opregelen", "Afregelen"]]
data.columns = ["price_up", "price_down"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def get_afrr_prices_nl(start, end):
bd = get_balansdelta_nl(start=start, end=end)[
["Hoogste_prijs_opregelen", "Laagste_prijs_afregelen"]
]
bd.columns = ["rt_price_UP", "rt_price_DOWN"]
afrr_prices = _get_afrr_prices_nl_from_tennet(start, end).reindex(
bd.index, method="ffill"
)
return pd.concat([afrr_prices, bd], axis=1)
def _get_index_first_and_last_entry(data, exporttype):
if exporttype == "balansdelta2017":
time_col_name = "tijd"
elif exporttype == "verrekenprijzen":
time_col_name = "periode_van"
return [
pd.to_datetime(
" ".join((data["datum"].iloc[ix], data[time_col_name].iloc[ix])),
format="%d-%m-%Y %H:%M",
)
for ix in [0, -1]
]
def _handle_missing_data_by_reindexing(data):
print("Warning: Entries missing from TenneT data.")
data.index = data[["datum", "periode_van"]].apply(lambda x: " ".join(x), axis=1)
data.index = pd.to_datetime(data.index, format="%d-%m-%Y %H:%M").tz_localize(
"Europe/Amsterdam", ambiguous=True
)
data = data[~data.index.duplicated(keep="first")]
date_ix = pd.date_range(
data.index[0], data.index[-1], freq="15T", tz="Europe/Amsterdam"
)
data = data.reindex(date_ix)
print("Workaround implemented: Dataset was reindexed automatically.")
return data
def get_imb_prices_be(startdate, enddate):
start = pd.to_datetime(startdate).tz_localize("Europe/Brussels").tz_convert("UTC")
end = (
pd.to_datetime(enddate).tz_localize("Europe/Brussels") + timedelta(days=1)
).tz_convert("UTC")
rows = int((end - start) / timedelta(minutes=15))
resp_df = pd.DataFrame()
while rows > 0:
print(f"Getting next chunk, {rows} remaining.")
chunk = min(3000, rows)
end = start + timedelta(minutes=chunk * 15)
resp_df = pd.concat([resp_df, elia_api_call(start, end)], axis=0)
start = end
rows -= chunk
resp_df.index = pd.date_range(
start=resp_df.index[0], end=resp_df.index[-1], tz="Europe/Brussels", freq="15T"
)
resp_df.index.name = "datetime"
resp_df = resp_df[
["positiveimbalanceprice", "negativeimbalanceprice", "qualitystatus"]
].rename(columns={"positiveimbalanceprice": "POS", "negativeimbalanceprice": "NEG"})
resp_df["Validated"] = False
resp_df.loc[resp_df["qualitystatus"] == "Validated", "Validated"] = True
resp_df.drop(columns=["qualitystatus"], inplace=True)
return resp_df
def elia_api_call(start, end):
dataset = "ods047"
sort_by = "datetime"
url = "https://opendata.elia.be/api/records/1.0/search/"
rows = int((end - start) / timedelta(minutes=15))
end = end - timedelta(minutes=15)
endpoint = (
f"?dataset={dataset}&q=datetime:[{start.strftime('%Y-%m-%dT%H:%M:%SZ')}"
f" TO {end.strftime('%Y-%m-%dT%H:%M:%SZ')}]&rows={rows}&sort={sort_by}"
)
for _ in range(5):
try:
resp = requests.get(url + endpoint)
if resp.ok:
break
else:
raise Exception()
except Exception:
print("retrying...")
time.sleep(1)
if not resp.ok:
raise Exception(f"Error when calling API. Status code: {resp.status_code}")
resp_json = json.loads(resp.content)
resp_json = [entry["fields"] for entry in resp_json["records"]]
df = pd.DataFrame(resp_json).set_index("datetime")
df.index = pd.to_datetime(df.index, utc=True).tz_convert("Europe/Brussels")
df = df.sort_index()
return df
def get_da_prices_from_entsoe(
start, end, country_code, tz, freq="H", entsoe_api_key=None
):
"""Get Day-Ahead prices from ENTSOE
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with day-ahead prices.
"""
if not entsoe_api_key:
try:
entsoe_api_key = "f6c67fd5-e423-47bc-8a3c-98125ccb645e"
except:
raise ValueError("Please enter ENTSOE API key")
client = EntsoePandasClient(entsoe_api_key)
data = client.query_day_ahead_prices(
country_code, start=start, end=end + timedelta(days=1)
)
data = data[~data.index.duplicated()]
data.index = pd.date_range(data.index[0], data.index[-1], freq="H", tz=tz)
if freq != "H":
data = _reindex_to_freq(data, freq, tz)
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def _reindex_to_freq(data, freq, tz):
new_ix = pd.date_range(
data.index[0],
data.index[-1] + timedelta(hours=1),
freq=freq,
tz=tz,
)
return data.reindex(index=new_ix, method="ffill")
def get_da_prices_nl(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "NL", "Europe/Amsterdam", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_da_prices_be(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "BE", "Europe/Brussels", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_ets_prices(start, end, freq="D"):
"""Get CO2 prices (ETS) from ICE
Values are in /ton CO2
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with ETS settlement prices with datetime index (local time)
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ets_prices_from_database(start_x, end_x, 'NLD')
data = data.resample('1T').ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
# here = pytz.timezone("Europe/Amsterdam")
# start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
# end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
# path = Path(
# f"./data/ets_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
# )
# if path.exists():
# return _load_from_csv(path, freq=freq)
# else:
# raise Exception("Data not available for chosen dates.")
def get_ttf_prices(start, end, freq="D"):
"""Get Day-Ahead natural gas prices (TTF Day-ahead) from ICE
Values are in /MWh
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with TTF day-ahead prices with datetime index (local time)
Start and End are converted into start of year and end of year
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ttf_prices_from_database(start_x, end_x, 'NLD')
data = data.resample('1T').ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
# # while start_year <= end_year:
# here = pytz.timezone("Europe/Amsterdam")
# start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
# end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
# path = Path(
# f"./data/ttf_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
# )
# print(path)
# if path.exists():
# return _load_from_csv(path, freq=freq)
# else:
# raise Exception("Data not available for chosen dates.")
def _load_from_csv(filepath, freq):
data = pd.read_csv(
filepath,
delimiter=";",
decimal=",",
parse_dates=False,
index_col="datetime",
)
ix_start = pd.to_datetime(data.index[0], utc=True).tz_convert("Europe/Amsterdam")
ix_end = pd.to_datetime(data.index[-1], utc=True).tz_convert("Europe/Amsterdam")
data.index = pd.date_range(ix_start, ix_end, freq=freq, tz="Europe/Amsterdam")
return data.squeeze()
##### RECOY DATABASE QUERIES #####
def get_day_ahead_prices_from_database(start_hour, end_hour, CountryIsoCode, tz='utc'):
table = 'DayAheadPrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['HourStartTime'] >= start_hour,
table.columns['HourStartTime'] < end_hour
))
data = pd.DataFrame(data)
data['HourStartTime'] = pd.to_datetime(data['HourStartTime'], utc=True)
data.index = data['HourStartTime']
data.index.name = 'datetime'
data = data[['Price', 'CountryIsoCode']]
data.columns = ['DAM', 'CountryIsoCode']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_imbalance_prices_from_database(start_quarter, end_quarter, CountryIsoCode, tz='utc'):
table = 'ImbalancePrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['QuarterStartTime'] >= start_quarter,
table.columns['QuarterStartTime'] < end_quarter
))
data = pd.DataFrame(data)
data['QuarterStartTime'] = pd.to_datetime(data['QuarterStartTime'], utc=True)
data.index = data['QuarterStartTime']
data.index.name = 'datetime'
data = data[['FeedToGridPrice', 'TakeFromGridPrice', 'CountryIsoCode']]
data.columns = ['POS', 'NEG', 'CountryIsoCode']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_FCR_prices_from_database(start_day, end_day, CountryIsoCode, tz='utc'):
table = 'ReservePrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['Timestamp'] >= start_day,
table.columns['Timestamp'] <= end_day,
table.columns['ReserveType'] == 'FCR'
))
data = pd.DataFrame(data)
data['Timestamp'] = pd.to_datetime(data['Timestamp'], utc=True)
data.index = data['Timestamp']
data.index.name = 'datetime'
data = data[['PricePerMWPerISP', 'CountryIsoCode']]
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_imbalance_forecasts_from_database_on_publication_time(start_publication_time, end_publication_time, ForecastSources, CountryIsoCodes):
table = 'ImbalancePriceForecasts'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'].in_(CountryIsoCodes),
table.columns['PublicationTime'] >= start_publication_time,
table.columns['PublicationTime'] < end_publication_time,
table.columns['ForecastSource'].in_(ForecastSources)
))
return pd.DataFrame(data)
def get_imbalance_forecasts_from_database_on_quarter_start_time(start_quarter, end_quarter, ForecastSources, CountryIsoCode):
table = 'ImbalancePriceForecasts'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['QuarterStartTime'] >= start_quarter,
table.columns['QuarterStartTime'] < end_quarter,
table.columns['PublicationTime'] < end_quarter,
table.columns['ForecastSource'].in_(ForecastSources)
))
return pd.DataFrame(data)
def get_ttf_prices_from_database(start, end, CountryIsoCode, tz='utc'):
if start.tzinfo != pytz.utc:
start = start.astimezone(pytz.utc)
if end.tzinfo != pytz.utc:
end = end.astimezone(pytz.utc)
table = 'GasPrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['Timestamp'] >= start,
table.columns['Timestamp'] < end
))
data = pd.DataFrame(data)
data['Timestamp'] = pd.to_datetime(data['Timestamp'], utc=True)
data.index = data['Timestamp']
data.index.name = 'datetime'
data = data[['TTFPrice']]
data.columns = ['Gas prices (€/MWh)']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_ets_prices_from_database(start, end, CountryIsoCode, tz='utc'):
if start.tzinfo != pytz.utc:
start = start.astimezone(pytz.utc)
if end.tzinfo != pytz.utc:
end = end.astimezone(pytz.utc)
table = 'Co2Prices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['Timestamp'] >= start,
table.columns['Timestamp'] < end
))
data = pd.DataFrame(data)
data['Timestamp'] = pd.to_datetime(data['Timestamp'], utc=True)
data.index = data['Timestamp']
data.index.name = 'datetime'
data = data[['Price']]
data.columns = ['CO2 prices (€/MWh)']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data

@ -0,0 +1,792 @@
import os
from datetime import timedelta
from io import BytesIO
from pathlib import Path
from zipfile import ZipFile
import time
import warnings
import json
import pytz
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from entsoe.entsoe import EntsoePandasClient
from sqlalchemy import MetaData, Table, insert, and_, or_
from pyrecoy import *
def get_fcr_prices(start, end, freq="H") -> pd.DataFrame:
"""Get FCR settlement prices from Regelleistung website
Returns: DataFrame with FCR prices with index with given time frequency in local time.
"""
start = start + timedelta(-1)
end = end + timedelta(1)
data = get_FCR_prices_from_database(start, end, "NLD")
data = data.resample("15T").ffill()
data = data[["PricePerMWPerISP"]]
data.columns = ["FCR NL (EUR/ISP)"]
data.index.name = "datetime"
data = data.tz_convert("Europe/Amsterdam")
return data
path = Path(
f"./data/fcr_prices_{freq}_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(
startdate, enddate, freq=freq, tz="Europe/Amsterdam", name="datetime"
)
return df
dfs = []
retry = 5
for date in pd.date_range(start=start, end=end + timedelta(days=1)):
r = 0
# print(f'DEBUG: {date}')
while r < retry:
try:
url = (
f"https://www.regelleistung.net/apps/cpp-publisher/api/v1/download/tenders/"
f"resultsoverview?date={date.strftime('%Y-%m-%d')}&exportFormat=xlsx&market=CAPACITY&productTypes=FCR"
)
df = pd.read_excel(url, engine="openpyxl")[
[
"DATE_FROM",
"PRODUCTNAME",
"NL_SETTLEMENTCAPACITY_PRICE_[EUR/MW]",
"DE_SETTLEMENTCAPACITY_PRICE_[EUR/MW]",
]
]
# print(f'DEBUG: {date} read in')
dfs.append(df)
break
except Exception:
# print(r)
time.sleep(1)
r += 1
warnings.warn(
f'No data received for {date.strftime("%Y-%m-%d")}. Retrying...({r}/{retry})'
)
if r == retry:
raise RuntimeError(f'No data received for {date.strftime("%Y-%m-%d")}')
df = pd.concat(dfs, axis=0)
df["hour"] = df["PRODUCTNAME"].map(lambda x: int(x.split("_")[1]))
df["Timeblocks"] = (
df["PRODUCTNAME"].map(lambda x: int(x.split("_")[2])) - df["hour"]
)
df.index = df.apply(
lambda row: pd.to_datetime(row["DATE_FROM"]) + timedelta(hours=row["hour"]),
axis=1,
).dt.tz_localize("Europe/Amsterdam")
df.drop(columns=["DATE_FROM", "PRODUCTNAME", "hour"], inplace=True)
df.rename(
columns={
"NL_SETTLEMENTCAPACITY_PRICE_[EUR/MW]": f"FCR Price NL [EUR/MW/{freq}]",
"DE_SETTLEMENTCAPACITY_PRICE_[EUR/MW]": f"FCR Price DE [EUR/MW/{freq}]",
},
inplace=True,
)
try:
df[f"FCR Price NL [EUR/MW/{freq}]"] = df[
f"FCR Price NL [EUR/MW/{freq}]"
].astype(float)
df[f"FCR Price DE [EUR/MW/{freq}]"] = df[
f"FCR Price NL [EUR/MW/{freq}]"
].astype(float)
except Exception as e:
warnings.warn(
f"Could not convert data to floats. Should check... Exception: {e}"
)
df = df[~df.index.duplicated(keep="first")]
new_ix = pd.date_range(
start=df.index[0], end=df.index[-1], freq=freq, tz="Europe/Amsterdam"
)
df = df.reindex(new_ix, method="ffill")
mult = {"H": 1, "4H": 4, "D": 24}
df[f"FCR Price NL [EUR/MW/{freq}]"] /= df["Timeblocks"] / mult[freq]
df[f"FCR Price DE [EUR/MW/{freq}]"] /= df["Timeblocks"] / mult[freq]
df = df[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
df.to_csv(path, sep=";", decimal=",", index_label="datetime")
return df
def get_tennet_data(exporttype, start, end):
"""Download data from TenneT API
TenneT documentation:
https://www.tennet.org/bedrijfsvoering/exporteer_data_toelichting.aspx
Parameters:
-----------
exporttype : str
Exporttype as defined in TenneT documentation.
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with API output.
"""
datefrom = start.strftime("%d-%m-%Y")
dateto = end.strftime("%d-%m-%Y")
url = (
f"http://www.tennet.org/bedrijfsvoering/ExporteerData.aspx?exporttype={exporttype}"
f"&format=csv&datefrom={datefrom}&dateto={dateto}&submit=1"
)
return pd.read_csv(url, decimal=",")
def get_imb_prices_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
exporttype = "verrekenprijzen"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["invoeden", "Afnemen", "regeltoestand"]]
data.columns = ["POS", "NEG", "RS"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def get_balansdelta_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
filename = f"balansdelta_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime")
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(startdate, enddate, freq="1T", tz="Europe/Amsterdam")
return data
exporttype = "balansdelta2017"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="1T", tz="Europe/Amsterdam")
data.index = date_ix
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_from_entsoe(start, end, marketagreement_type, entsoe_api_key):
client = EntsoePandasClient(entsoe_api_key)
return client.query_contracted_reserve_prices(
country_code="NL",
start=start,
end=end + timedelta(days=1),
type_marketagreement_type=marketagreement_type,
)
def get_afrr_capacity_fees_nl(start, end, entsoe_api_key=None):
path = Path(
f"./data/afrr_capacity_fees_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(startdate, enddate, freq="D", tz="Europe/Amsterdam")
return df
if not entsoe_api_key:
try:
entsoe_api_key = os.environ["ENTSOE_API_KEY"]
except:
raise ValueError("Please enter ENTSOE API key")
date_to_daily_bids = pd.to_datetime("2020-08-31").tz_localize("Europe/Amsterdam")
if start < date_to_daily_bids:
_start = start - timedelta(days=7)
data = _get_afrr_prices_from_entsoe(
start=_start,
end=min(date_to_daily_bids, end),
marketagreement_type="A02",
entsoe_api_key=entsoe_api_key,
)[["Automatic frequency restoration reserve - Symmetric"]]
if end > date_to_daily_bids:
_end = date_to_daily_bids - timedelta(days=1)
else:
_end = end
dt_index = pd.date_range(start, _end, freq="D", tz="Europe/Amsterdam")
data = data.reindex(dt_index, method="ffill")
# ENTSOE:
# "Before week no. 1 of 2020 the values are published per period
# per MW (Currency/MW per procurement period); meaning that it
# is not divided by MTU/ISP in that period."
if start < pd.to_datetime("2019-12-23"):
data[: pd.to_datetime("2019-12-22")] /= 7 * 24 * 4
if end >= date_to_daily_bids:
_data = (
_get_afrr_prices_from_entsoe(
start=max(date_to_daily_bids, start),
end=end,
marketagreement_type="A01",
entsoe_api_key=entsoe_api_key,
)
.resample("D")
.first()
)
cols = [
"Automatic frequency restoration reserve - Down",
"Automatic frequency restoration reserve - Symmetric",
"Automatic frequency restoration reserve - Up",
]
for col in cols:
if col not in _data.columns:
_data[col] = np.NaN
_data = _data[cols]
try:
data = pd.concat([data, _data], axis=0)
except Exception:
data = _data
data = data[start:end]
new_col_names = {
"Automatic frequency restoration reserve - Down": "aFRR Down [€/MW/day]",
"Automatic frequency restoration reserve - Symmetric": "aFRR Symmetric [€/MW/day]",
"Automatic frequency restoration reserve - Up": "aFRR Up [€/MW/day]",
}
data.rename(columns=new_col_names, inplace=True)
hours_per_day = (
pd.Series(
data=0,
index=pd.date_range(
start,
end + timedelta(days=1),
freq="15T",
tz="Europe/Amsterdam",
inclusive="left",
),
)
.resample("D")
.count()
)
data = data.multiply(hours_per_day.values, axis=0).round(2)
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_nl_from_tennet(start, end):
"""Get aFRR prices from TenneT API
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with imbalance prices.
"""
filename = f"afrr_prices_nl_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(
float
)
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(
startdate, enddate, freq="15T", tz="Europe/Amsterdam"
)
return data
data = get_tennet_data("verrekenprijzen", start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, "verrekenprijzen")
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["opregelen", "Afregelen"]]
data.columns = ["price_up", "price_down"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def get_afrr_prices_nl(start, end):
bd = get_balansdelta_nl(start=start, end=end)[
["Hoogste_prijs_opregelen", "Laagste_prijs_afregelen"]
]
bd.columns = ["rt_price_UP", "rt_price_DOWN"]
afrr_prices = _get_afrr_prices_nl_from_tennet(start, end).reindex(
bd.index, method="ffill"
)
return pd.concat([afrr_prices, bd], axis=1)
def _get_index_first_and_last_entry(data, exporttype):
if exporttype == "balansdelta2017":
time_col_name = "tijd"
elif exporttype == "verrekenprijzen":
time_col_name = "periode_van"
return [
pd.to_datetime(
" ".join((data["datum"].iloc[ix], data[time_col_name].iloc[ix])),
format="%d-%m-%Y %H:%M",
)
for ix in [0, -1]
]
def _handle_missing_data_by_reindexing(data):
print("Warning: Entries missing from TenneT data.")
data.index = data[["datum", "periode_van"]].apply(lambda x: " ".join(x), axis=1)
data.index = pd.to_datetime(data.index, format="%d-%m-%Y %H:%M").tz_localize(
"Europe/Amsterdam", ambiguous=True
)
data = data[~data.index.duplicated(keep="first")]
date_ix = pd.date_range(
data.index[0], data.index[-1], freq="15T", tz="Europe/Amsterdam"
)
data = data.reindex(date_ix)
print("Workaround implemented: Dataset was reindexed automatically.")
return data
def get_imb_prices_be(startdate, enddate):
start = pd.to_datetime(startdate).tz_localize("Europe/Brussels").tz_convert("UTC")
end = (
pd.to_datetime(enddate).tz_localize("Europe/Brussels") + timedelta(days=1)
).tz_convert("UTC")
rows = int((end - start) / timedelta(minutes=15))
resp_df = pd.DataFrame()
while rows > 0:
print(f"Getting next chunk, {rows} remaining.")
chunk = min(3000, rows)
end = start + timedelta(minutes=chunk * 15)
resp_df = pd.concat([resp_df, elia_api_call(start, end)], axis=0)
start = end
rows -= chunk
resp_df.index = pd.date_range(
start=resp_df.index[0], end=resp_df.index[-1], tz="Europe/Brussels", freq="15T"
)
resp_df.index.name = "datetime"
resp_df = resp_df[
["positiveimbalanceprice", "negativeimbalanceprice", "qualitystatus"]
].rename(columns={"positiveimbalanceprice": "POS", "negativeimbalanceprice": "NEG"})
resp_df["Validated"] = False
resp_df.loc[resp_df["qualitystatus"] == "Validated", "Validated"] = True
resp_df.drop(columns=["qualitystatus"], inplace=True)
return resp_df
def elia_api_call(start, end):
dataset = "ods047"
sort_by = "datetime"
url = "https://opendata.elia.be/api/records/1.0/search/"
rows = int((end - start) / timedelta(minutes=15))
end = end - timedelta(minutes=15)
endpoint = (
f"?dataset={dataset}&q=datetime:[{start.strftime('%Y-%m-%dT%H:%M:%SZ')}"
f" TO {end.strftime('%Y-%m-%dT%H:%M:%SZ')}]&rows={rows}&sort={sort_by}"
)
for _ in range(5):
try:
resp = requests.get(url + endpoint)
if resp.ok:
break
else:
raise Exception()
except Exception:
print("retrying...")
time.sleep(1)
if not resp.ok:
raise Exception(f"Error when calling API. Status code: {resp.status_code}")
resp_json = json.loads(resp.content)
resp_json = [entry["fields"] for entry in resp_json["records"]]
df = pd.DataFrame(resp_json).set_index("datetime")
df.index = pd.to_datetime(df.index, utc=True).tz_convert("Europe/Brussels")
df = df.sort_index()
return df
def get_da_prices_from_entsoe(
start, end, country_code, tz, freq="H", entsoe_api_key=None
):
"""Get Day-Ahead prices from ENTSOE
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with day-ahead prices.
"""
if not entsoe_api_key:
try:
entsoe_api_key = "f6c67fd5-e423-47bc-8a3c-98125ccb645e"
except:
raise ValueError("Please enter ENTSOE API key")
client = EntsoePandasClient(entsoe_api_key)
data = client.query_day_ahead_prices(
country_code, start=start, end=end + timedelta(days=1)
)
data = data[~data.index.duplicated()]
data.index = pd.date_range(data.index[0], data.index[-1], freq="H", tz=tz)
if freq != "H":
data = _reindex_to_freq(data, freq, tz)
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def _reindex_to_freq(data, freq, tz):
new_ix = pd.date_range(
data.index[0],
data.index[-1] + timedelta(hours=1),
freq=freq,
tz=tz,
)
return data.reindex(index=new_ix, method="ffill")
def get_da_prices_nl(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "NL", "Europe/Amsterdam", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_da_prices_be(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "BE", "Europe/Brussels", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_ets_prices(start, end, freq="D"):
"""Get CO2 prices (ETS) from ICE
Values are in /ton CO2
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with ETS settlement prices with datetime index (local time)
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ets_prices_from_database(start_x, end_x, "NLD")
data = data.resample("1T").ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
here = pytz.timezone("Europe/Amsterdam")
start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
path = Path(
f"./data/ets_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
)
if path.exists():
return _load_from_csv(path, freq=freq)
else:
raise Exception("Data not available for chosen dates.")
def get_ttf_prices(start, end, freq="D"):
"""Get Day-Ahead natural gas prices (TTF Day-ahead) from ICE
Values are in /MWh
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with TTF day-ahead prices with datetime index (local time)
Start and End are converted into start of year and end of year
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ttf_prices_from_database(start_x, end_x, "NLD")
data = data.resample("1T").ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
# while start_year <= end_year:
here = pytz.timezone("Europe/Amsterdam")
start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
path = Path(
f"./data/ttf_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
)
if path.exists():
return _load_from_csv(path, freq=freq)
else:
raise Exception("Data not available for chosen dates.")
def _load_from_csv(filepath, freq):
data = pd.read_csv(
filepath,
delimiter=";",
decimal=",",
parse_dates=False,
index_col="datetime",
)
ix_start = pd.to_datetime(data.index[0], utc=True).tz_convert("Europe/Amsterdam")
ix_end = pd.to_datetime(data.index[-1], utc=True).tz_convert("Europe/Amsterdam")
data.index = pd.date_range(ix_start, ix_end, freq=freq, tz="Europe/Amsterdam")
return data.squeeze()
##### RECOY DATABASE QUERIES #####
def convert_columns_to_localized_datetime_from_utc(df, columns, tz):
for column in columns:
df[column] = pd.to_datetime(df[column], utc=True)
df[column] = df[column].dt.tz_convert(tz)
return df
def get_price_data_from_database(
database_name,
time_index_column,
database_columns,
rename_columns,
start,
end,
CountryIsoCode,
tz="utc",
to_datetime_columns=[],
):
"""_summary_
Args:
database_name (string): name of the database
time_index_column (string): column which is converted to a datetime column and used as the index
database_columns (list of strings): columns of the database table you want to query
rename_columns (list of strings): new names for the columns which are queried
start (string or datetime): start time of the data you want to select based on the time_index_column
end (string or datetime): end time of the data you want to select based on the time_index_column
CountryIsoCode (string): CountryIsoCode of the data
tz (str, optional): Timezone you want the datatime columns to be converted to
to_datetime_columns (list, optional): Additional columns which are transferred to datetime columns. Defaults to [].
Returns:
_type_: _description_
"""
table = database_name
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(
and_(
table.columns["CountryIsoCode"] == CountryIsoCode,
table.columns[time_index_column] >= start,
table.columns[time_index_column] < end,
)
)
data = pd.DataFrame(data)
data[time_index_column] = pd.to_datetime(data[time_index_column], utc=True)
data.index = data[time_index_column]
data.index.name = "datetime"
data = data[database_columns + ["CountryIsoCode"]]
data.columns = rename_columns + ["CountryIsoCode"]
if tz.__eq__("utc") is False:
data = data.tz_convert(tz)
data = convert_columns_to_localized_datetime_from_utc(data, to_datetime_columns, tz)
return data
def get_day_ahead_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"DayAheadPrices",
"HourStartTime",
["Price"],
["DAM"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_imbalance_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"ImbalancePrices",
"QuarterStartTime",
["FeedToGridPrice", "TakeFromGridPrice"],
["POS", "NEG"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_imbalance_forecasts_from_database_on_publication_time(
start, end, CountryIsoCode, tz="utc"
):
return get_price_data_from_database(
"ImbalancePriceForecasts",
"PublicationTime",
["PublicationTime", "QuarterStartTime", "FeedToGridPrice", "TakeFromGridPrice"],
["PublicationTime", "QuarterStartTime", "ForePos", "ForeNeg"],
start,
end,
CountryIsoCode,
tz=tz,
to_datetime_columns=["QuarterStartTime"],
)
def get_imbalance_forecasts_from_database_on_quarter_start_time(
start, end, CountryIsoCode, tz="utc"
):
return get_price_data_from_database(
"ImbalancePriceForecasts",
"QuarterStartTime",
["PublicationTime", "QuarterStartTime", "FeedToGridPrice", "TakeFromGridPrice"],
["PublicationTime", "QuarterStartTime", "ForePos", "ForeNeg"],
start,
end,
CountryIsoCode,
tz=tz,
to_datetime_columns=["QuarterStartTime"],
)
def get_ttf_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"GasPrices",
"DeliveryDate",
["Price"],
["Gas prices (€/MWh)"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_ets_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"Co2Prices",
"DeliveryDate",
["Price"],
["CO2 prices (€/MWh)"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_reserve_prices_from_database(
start, end, reserve_type, CountryIsoCode, tz="utc"
):
data = get_price_data_from_database(
"ReservePrices",
"Timestamp",
["PricePerMWPerISP", "ReserveType"],
["PricePerMWPerISP", "ReserveType"],
start,
end,
CountryIsoCode,
tz=tz,
)
data = data.loc[data["ReserveType"] == reserve_type]
return data
def get_FCR_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(start, end, "FCR", CountryIsoCode, tz=tz)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "aFRR Up", CountryIsoCode, tz=tz
)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "aFRR Down", CountryIsoCode, tz=tz
)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "mFRR Up", CountryIsoCode, tz=tz
)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "mFRR Up", CountryIsoCode, tz=tz
)

@ -0,0 +1,156 @@
import numpy as np
import pandas as pd
from .styling import businesscase_formatter, num_formatting, perc_formatting
class CaseReport:
"""Dataframe report showing KPIs for specific CaseStudy.
Parameters:
-----------
case : CaseStudy
kind : str
The report type. {electr_market_results', cashflows', 'ebitda_calc'}.
baseline : CaseStudy
include_perc: bool
"""
def __init__(self, case, kind):
self._check_if_attr_exists(case, kind)
case_data = getattr(case, kind)
self.report = self.create_report(case.name, case_data)
self.formatting = "number"
def _check_if_attr_exists(self, case, kind):
if not hasattr(case, kind):
raise AttributeError(
f"Attribute '{kind}' is not available for '{case.name}' case. "
"You should first generate it using "
"the appropriate CaseStudy method."
)
def create_report(self, case_name, case_data):
if isinstance(case_data, dict):
case_data = pd.Series(case_data)
return pd.DataFrame(case_data, columns=[case_name])
def show(self, presentation_format=True):
if not presentation_format:
return self.report
if self.formatting == "percentage":
return self.report.applymap(perc_formatting)
else:
return self.report.applymap(num_formatting)
class ComparisonReport(CaseReport):
"""Dataframe report showing a copmarison of KPIs between CaseStudy instances.
Parameters:
-----------
cases : list
List of CaseStudy instances
kind : str
Type of report
baseline : CaseStudy
CaseStudy instance to use as baseline
comparison : str
{'absolute', 'relative', 'percentage'}
Sets how the numbers in the comparison are in relation to the baseline.
"""
def __init__(self, cases, kind, baseline=None, comparison="absolute"):
case_reports = []
self.formatting = "number"
for case in cases:
case_report = CaseReport(case=case, kind=kind).report
case_reports.append(case_report)
self.report = pd.concat(case_reports, axis=1).fillna(0)
if comparison == "relative":
self._comp_relative(baseline)
elif comparison == "percentage":
self._comp_percentage(baseline)
# ugly fix to make sure EBITDA is at the bottom when df is printed
if kind == "ebitda_calc":
ix = self.report.index.to_list()
ix.remove("EBITDA (€)")
ix.remove("Depreciation (€)")
ix.remove("EBITDA + depr (€)")
ix.append("EBITDA (€)")
ix.append("Depreciation (€)")
ix.append("EBITDA + depr (€)")
self.report = self.report.reindex(ix)
def _comp_relative(self, baseline):
baseline_report = self.report[baseline.name]
self.report = self.report.subtract(baseline_report, axis=0)
if baseline.name in self.report.columns:
self.report.drop(columns=baseline.name, inplace=True)
if baseline.name in self.report.index:
self.report.drop(index=baseline.name, inplace=True)
self.formatting = "number"
def _comp_percentage(self, baseline):
baseline_report = self.report[baseline.name]
self.report = self.report.divide(baseline_report / 100, axis=0).replace(
[-np.inf, np.inf], 0
)
self.report.replace([-np.inf, np.inf], 0, inplace=True)
self.formatting = "percentage"
class BusinessCaseReport(CaseReport):
"""Show business case for CaseStudy"""
def __init__(self, case, presentation_format=False):
self._check_if_attr_exists(case, "business_case")
self.report = getattr(case, "business_case")
def show(self, presentation_format=True):
if presentation_format:
return businesscase_formatter(self.report)
else:
return self.report
class SingleFigureComparison(ComparisonReport):
def __init__(
self,
cases,
kpi,
label,
baseline=None,
comparison="absolute",
):
figure_dict = {}
for case in cases:
self._check_if_attr_exists(case, kpi)
figure_dict[case.name] = getattr(case, kpi)
self.report = pd.Series(figure_dict, name=label)
if comparison == "relative":
self._comp_relative(baseline)
elif comparison == "percentage":
self._comp_percentage(baseline)
def show(self, nformat=None):
if nformat is not None:
return self.report.apply(nformat.format)
else:
return self.report
def _comp_relative(self, baseline):
baseline_report = self.report[baseline.name]
self.report = self.report.subtract(baseline_report, axis=0)
self.report.drop(index=baseline.name, inplace=True)
self.formatting = "number"

@ -0,0 +1,34 @@
def get_power_profiles(start, end, country, in_local_time=True):
start = timestamp_to_utc(start)
end = timestamp_to_utc(end)
engine = db_engine("rop_test")
connection, table = create_connection(engine, "ImbalancePrices")
start = start.floor("15T")
query = (
select([table])
.where(
table.columns.QuarterStartTime >= start.strftime("%Y-%m-%d %H:%M"),
table.columns.QuarterStartTime < end.strftime("%Y-%m-%d %H:%M"),
table.columns.CountryIsoCode == country,
)
.order_by(table.columns.QuarterStartTime)
)
result = connection.execute(query).fetchall()
if len(result) == 0:
raise Exception("Day-ahead prices data not yet available.")
data = pd.DataFrame(result, columns=result[0].keys())
if in_local_time:
data["QuarterStartTime"] = dt_column_to_local_time(data["QuarterStartTime"])
data.drop(columns=["Id", "CountryIsoCode"], inplace=True)
data.rename(
columns={
"QuarterStartTime": "datetime",
"TakeFromGridPrice": "NEG",
"FeedToGridPrice": "POS",
},
inplace=True,
)
return data.set_index("datetime")[["POS", "NEG"]]

@ -0,0 +1,92 @@
from sqlalchemy import select
import pandas as pd
from .converters import dt_column_to_local_time, timestamp_to_utc
from .databases import db_engine, create_connection
def get_imbalance_prices(start, end, country, in_local_time=True):
start = timestamp_to_utc(start)
end = timestamp_to_utc(end)
engine = db_engine("rop_prices_test")
connection, table = create_connection(engine, "ImbalancePrices")
start = start.floor("15T")
query = (
select([table])
.where(
table.columns.QuarterStartTime >= start.strftime("%Y-%m-%d %H:%M"),
table.columns.QuarterStartTime < end.strftime("%Y-%m-%d %H:%M"),
table.columns.CountryIsoCode == country,
)
.order_by(table.columns.QuarterStartTime)
)
result = connection.execute(query).fetchall()
if len(result) == 0:
raise Exception("Day-ahead prices data not yet available.")
data = pd.DataFrame(result, columns=result[0].keys())
if in_local_time:
data["QuarterStartTime"] = dt_column_to_local_time(data["QuarterStartTime"])
data.drop(columns=["Id", "CountryIsoCode"], inplace=True)
data.rename(
columns={
"QuarterStartTime": "datetime",
"TakeFromGridPrice": "NEG",
"FeedToGridPrice": "POS",
},
inplace=True,
)
return data.set_index("datetime")[["POS", "NEG"]]
def get_dayahead_prices(start, end, country, in_local_time=True):
start = timestamp_to_utc(start)
end = timestamp_to_utc(end)
engine = db_engine("rop_prices_test")
connection, table = create_connection(engine, "DayAheadPrices")
start = start.floor("60T")
query = (
select([table])
.where(
table.columns.HourStartTime >= start.strftime("%Y-%m-%d %H:%M"),
table.columns.HourStartTime < end.strftime("%Y-%m-%d %H:%M"),
table.columns.CountryIsoCode == country,
)
.order_by(table.columns.HourStartTime)
)
result = connection.execute(query).fetchall()
if len(result) == 0:
raise Exception("Day-ahead prices data not yet available.")
data = pd.DataFrame(result, columns=result[0].keys())
if in_local_time:
data["HourStartTime"] = dt_column_to_local_time(data["HourStartTime"])
data.drop(columns=["Id", "CountryIsoCode"], inplace=True)
data.rename(columns={"HourStartTime": "datetime", "Price": "DAM"}, inplace=True)
return data.set_index("datetime")
def get_market_price_data(start, end, country, in_local_time=True):
tz = "Europe/Amsterdam" if in_local_time else "UTC"
dt_ix = pd.date_range(
start=start.floor("H"),
end=end.ceil("H"),
freq="15T",
tz=tz,
inclusive="left",
)
prices = pd.DataFrame(index=dt_ix, columns=["DAM", "POS", "NEG"])
prices["DAM"] = get_dayahead_prices(
start, end, country=country, in_local_time=in_local_time
).reindex(dt_ix, method="ffill")
prices["DAM"].fillna(method="ffill", inplace=True)
imbprices = get_imbalance_prices(
start, end, country=country, in_local_time=in_local_time
)
prices["POS"] = imbprices["POS"]
prices["NEG"] = imbprices["NEG"]
return prices

@ -0,0 +1,225 @@
import itertools
import gc
from copy import deepcopy
from tkinter import Label
import warnings
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
from millify import millify
from plotly.graph_objs import Figure
from .casestudy import CaseStudy
from .colors import recoygreen, recoyred
class SensitivityAnalysis:
"""
Runs an simulation routine with different input configurations,
so that sensitivity of variables can be analysed.
"""
def __init__(self, c, s, routine, param, values, output_kpis):
self.configs = self._generate_configs(c, param, values)
output_dict = self._prepare_output_dict(s.cases, output_kpis)
self.kpis = self._run_sensitivities(s, routine, output_kpis, output_dict)
def _generate_configs(self, c, param, values):
configs = {}
for value in values:
_c = deepcopy(c)
setattr(_c, param, value)
configs[value] = _c
return configs
def _prepare_output_dict(self, cases, output_kpis):
output_dict = dict.fromkeys(self.configs.keys())
for value in self.configs:
output_dict[value] = dict.fromkeys(output_kpis)
for kpi in output_kpis:
output_dict[value][kpi] = dict.fromkeys([case.name for case in cases])
return output_dict
def _run_sensitivities(self, s, routine, output_kpis, output_dict):
for name, c in tqdm(self.configs.items()):
_s = deepcopy(s)
_s = routine(c, _s)
for kpi in output_kpis:
for case in _s.cases:
output_dict[name][kpi][case.name] = getattr(case, kpi, np.nan)
del _s
gc.collect()
return output_dict
def single_kpi_overview(self, kpi, case_names=None):
"""Creates a DataFrame with chosen output kpi,
for each CaseStudy in each Configuration.
"""
if not case_names:
case_names = CaseStudy.instances.keys()
kpi_values = {
name: {case: self.kpis[name][kpi][case] for case in case_names}
for name in self.kpis.keys()
}
return pd.DataFrame(kpi_values).T
def cashflows_comparison(self, case=None, baseline=None):
ebitda_calc_overview = {}
baseline_calc = {}
for input_value, kpi_data in self.kpis.items():
for kpi, case_data in kpi_data.items():
for case_name, data in case_data.items():
if kpi == "cashflows":
if case_name == case:
ebitda_calc_overview[input_value] = data
if case_name == baseline:
baseline_calc[input_value] = data
ebitda_calc_overview = pd.DataFrame(ebitda_calc_overview)
if not baseline:
return ebitda_calc_overview
baseline_calc = pd.DataFrame(baseline_calc)
return ebitda_calc_overview.subtract(baseline_calc, fill_value=0)
class SensitivityMatrix:
def __init__(self, c, s, routine, x_param, y_param, x_vals, y_vals, output_kpis):
self.x_param = x_param
self.y_param = y_param
self.configs = self._generate_configs(c, x_vals, y_vals)
output_dict = self._prepare_output_dict(s.cases, output_kpis)
self.kpis = self._run_sensitivities(s, routine, output_kpis, output_dict)
def _generate_configs(self, c, x_vals, y_vals):
configs = {x_val: dict.fromkeys(y_vals) for x_val in x_vals}
self.xy_combinations = list(itertools.product(x_vals, y_vals))
for x_val, y_val in self.xy_combinations:
_c = deepcopy(c)
setattr(_c, self.x_param, x_val)
setattr(_c, self.y_param, y_val)
configs[x_val][y_val] = _c
return configs
def _prepare_output_dict(self, cases, output_kpis):
output_dict = {}
for name in [case.name for case in cases]:
output_dict[name] = dict.fromkeys(output_kpis)
for kpi in output_kpis:
output_dict[name][kpi] = deepcopy(self.configs)
return output_dict
def _run_sensitivities(self, s, routine, output_kpis, output_dict):
for x_val, y_val in tqdm(self.xy_combinations):
_c = self.configs[x_val][y_val]
_s = deepcopy(s)
_s = routine(_c, _s)
for kpi in output_kpis:
for case in _s.cases:
output = getattr(case, kpi, np.nan)
output_dict[case.name][kpi][x_val][y_val] = output
del _s
del _c
gc.collect()
return output_dict
def show_matrix(self, case_name, kpi):
"""
Creates a DataFrame with chosen output kpi,
for each XY combination
"""
matrix = pd.DataFrame(self.kpis[case_name][kpi])
matrix.columns.name = self.x_param
matrix.index.name = self.y_param
return matrix
class ScenarioAnalysis(SensitivityAnalysis):
def __init__(self, c, s, routine, params_dict, labels, output_kpis):
self.labels = labels
self.configs = self._generate_configs(c, params_dict, labels)
output_dict = self._prepare_output_dict(s.cases, output_kpis)
self.kpis = self._run_sensitivities(s, routine, output_kpis, output_dict)
def _generate_configs(self, c, params_dict, labels):
configs = {}
for i, label in enumerate(labels):
_c = deepcopy(c)
for param, values in params_dict.items():
setattr(_c, param, values[i])
configs[label] = _c
return configs
class TornadoChart:
"""
TODO: Absolute comparison instead of relative
"""
def __init__(self, c, s, routine, case, tornado_vars, output_kpis):
self.case = case
self.kpis = self._run_sensitivities(
c, s, routine, case, tornado_vars, output_kpis
)
def _run_sensitivities(self, c, s, routine, case, tornado_vars, output_kpis):
labels = ["Low", "Medium", "High"]
outputs = {kpi: pd.DataFrame(index=labels) for kpi in output_kpis}
for param, values in tornado_vars.items():
sens = SensitivityAnalysis(c, s, routine, param, values, output_kpis)
for kpi in output_kpis:
output = sens.single_kpi_overview(kpi, case_names=[case.name])[
case.name
]
output.index = labels
outputs[kpi][" ".join((param, str(values)))] = output
for kpi in output_kpis:
base_performance = deepcopy(outputs[kpi].loc["Medium", :])
for scen in labels:
scen_performance = outputs[kpi].loc[scen, :]
relative_performance = (scen_performance / base_performance - 1) * 100
outputs[kpi].loc[scen, :] = relative_performance
outputs[kpi] = outputs[kpi].round(1)
outputs[kpi].sort_values(by="Low", axis=1, ascending=False, inplace=True)
return outputs
def show_chart(
self, kpi, dimensions=(800, 680), title="Tornado Chart", sort_by="Low"
):
outputs = self.kpis[kpi].sort_values(by=sort_by, axis=1, ascending=False)
traces = []
colors = {"Low": recoyred, "High": recoygreen}
for scenario in ["Low", "High"]:
trace = {
"type": "bar",
"x": outputs.loc[scenario, :].tolist(),
"y": outputs.columns,
"orientation": "h",
"name": scenario,
"marker": {"color": colors[scenario]},
}
traces.append(trace)
layout = {
"title": title,
"width": dimensions[0],
"height": dimensions[1],
"barmode": "relative",
"autosize": True,
"showlegend": True,
}
fig = Figure(data=traces, layout=layout)
fig.update_xaxes(
title_text=f"{kpi.upper()} % change compared to base scenario (Base {kpi.upper()} = {millify(getattr(self.case, kpi))})"
)
return fig

@ -0,0 +1,47 @@
from copy import deepcopy
from numbers import Number
import numpy as np
def num_formatting(val):
if np.isnan(val) or round(val, 0) == 0:
return "-"
else:
return f"{val:,.0f}"
def perc_formatting(val):
if np.isnan(val) or round(val, 0) == 0:
return "-"
else:
return f"{val:.1f}%"
def bc_formatting(val):
if not isinstance(val, Number):
return val
if np.isnan(val):
return ""
elif round(val, 2) == 0:
return "-"
else:
return f"{val:,.0f}"
def businesscase_formatter(df):
df_c = deepcopy(df)
spp = df_c.loc["Simple Payback Period", "Year 0"]
spp_str = "N/A" if np.isnan(spp) else str(spp) + " years"
df_c.loc["Simple Payback Period", "Year 0"] = spp_str
irr = df_c.loc["IRR (%)", "Year 0"]
if np.isnan(irr):
df_c.loc["IRR (%)", "Year 0"] = "N/A"
df_c = df_c.applymap(bc_formatting)
if not np.isnan(irr):
df_c.loc["IRR (%)", "Year 0"] += "%"
df_c.loc["WACC (%)", "Year 0"] += "%"
return df_c

Binary file not shown.

@ -0,0 +1,11 @@
from pyrecoy.prices import get_ttf_prices
import pandas as pd
data = get_ttf_prices(
start=pd.to_datetime("2019-01-01"),
end=pd.to_datetime("2019-12-31"),
ice_username="XXX",
ice_password="YYY",
)
print(data.head())

@ -0,0 +1 @@
from .database.Models.base import *

@ -0,0 +1,789 @@
import warnings
from functools import partial, lru_cache
from numbers import Number
from itertools import count
import numpy as np
from numpy.polynomial import Polynomial
from scipy.optimize import minimize_scalar
from .converters import *
class Asset:
"""Generic class for producing/consuming assets. Specific asset classes can
inherit from this class.
Parameters:
-----------
max_power : int/float
Maximum asset power in MW electric
min_power : int/float
Minimium asset load in MW electric
Usage:
------
Use the set_load and get_load methods to set and get asset status in MW.
Convention is negative values for inputs (consumption) and positive
values for outputs (production).
"""
_freq_to_multiplier = {"H": 1, "15T": (1 / 4), "1T": (1 / 60)}
_ids = count(0)
def __init__(self, name, max_power, min_power):
if min_power > max_power:
raise ValueError("'min_power' can not be larger than 'max_power'.")
self.name = name
self.id = next(self._ids)
self.max_power = max_power
self.min_power = min_power
self.modes = {"max": max_power, "min": min_power}
def __repr__(self):
return f"{self.__class__.__name__}(self, max_power={self.max_power}, min_power={self.min_power})"
def set_load(self, load):
"""Set Asset load in MW.
Convention is negative value for consumption and positive value
for production. Subclasses might use a different convention if
this seems more intiutive.
Returns the load that is set in MW.
"""
if load < self.min_power or load > self.max_power:
warnings.warn(
f"Chosen Asset load for {self.name} is out of range. "
f"Should be between {self.min_power} and {self.max_power}. "
f"Function will return boundary load level for now."
)
load = min(max(load, self.min_power), self.max_power)
return load
def set_mode(self, mode):
""" """
load = self.modes[mode]
return self.set_load(load)
def MW_to_MWh(self, MW):
"""Performs conversion from MW to MWh using the time_factor variable."""
return MW * self.time_factor
def MWh_to_MW(self, MWh):
"""Performs conversion from MWh to MW using the time_factor variable."""
return MWh / self.time_factor
def set_freq(self, freq):
"""
Function that aligns time frequency between Model and Asset.
Can be '1T', '15T' or 'H'
The time_factor variable is used in subclasses to perform MW to MWh conversions.
"""
self.freq = freq
self.time_factor = Asset._freq_to_multiplier[freq]
def set_financials(
self, capex, opex, devex, lifetime=None, depreciate=True, salvage_value=0
):
"""Set financial data of the asset."""
self.capex = capex
self.opex = opex
self.devex = devex
self.lifetime = lifetime
self.depreciate = depreciate
self.salvage_value = salvage_value
class Eboiler(Asset):
"""Subclass for an E-boiler."""
def __init__(self, name, max_power, min_power=0, efficiency=0.99):
super().__init__(name, min_power=-max_power, max_power=-min_power)
self.efficiency = efficiency
self.max_thermal_output = max_power * 0.99
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name}, max_power={self.max_power}, "
f"min_power={self.min_power}, efficiency={self.efficiency})"
)
def set_load(self, load):
"""Set load in MWe, returns (load, heat_output) in MWe and MWth
Convention is negative numbers for consumption.
Inserting a positive value will return an exception.
"""
if load > 0:
raise ValueError(
f"Eboiler.set_load() only accepts negative numbers by convention. "
f"{load} was inserted."
)
load = super().set_load(load)
heat_output = -load * self.efficiency
return (load, heat_output)
def set_heat_output(self, heat_output):
"""Set heat output in MWth, returns tuple (heat_output, eload) in MW"""
load = -heat_output / self.efficiency
load, heat_output = self.set_load(load)
return heat_output, load
class Heatpump(Asset):
"""Subclass for a Heatpump.
Use cop parameter to set fixed COP (float/int) or COP curve (func).
COP curve should take load in MWhe and return COP.
Parameters:
-----------
max_th_power : numeric
Maximum thermal output in MW (positive value)
cop_curve : numeric or list or function
3 ways to set the COP of the Heatpump:
(1) Fixed COP based on [numeric] value.
(2) Polynomial with coefficients based on [list] input.
Input coeficients in format [c0, c1, c2, ..., c(n)],
will generate Polynomial p(x) = c0 + c1*x + c2*x^2 ... cn*x^n,
where x = % thermal load (in % of thermal capacity) as decimal value.
Example:
cop=[1, 2, 3, 4] will result in following COP curve:
p(x) = 1 + 2x + 3x**2 + 4x**3,
(3) [function] in format func(*args, **kwargs)
Function should return a Polynomial that takes 'load_perc' as parameter.
min_th_power : numeric
Minimum thermal output in MW (positive value)
Notes:
------
Sign convention:
Thermal power outputs have positive values
Electric power inputs have negative values
"""
def __init__(
self,
name,
max_th_power,
cop_curve,
min_th_power=0,
):
if max_th_power < 0 or min_th_power < 0:
raise ValueError("Thermal power can not have negative values.")
if min_th_power > max_th_power:
raise ValueError("'min_th_power' can not be larger than 'max_th_power'.")
self.name = name
self.max_th_power = max_th_power
self.min_th_power = min_th_power
self.cop_curve = self._set_cop_curve(cop_curve)
def __repr__(self):
return (
f"{self.__class__.__name__}(name='{self.name}', max_thermal_power={self.max_th_power}, "
f"cop_curve={self.cop_curve}, min_th_power={self.min_th_power})"
)
# Is turning everything into a Polynomial the best solution here?
@staticmethod
@lru_cache(maxsize=None)
def _set_cop_curve(cop_curve):
"""Generate COP curve function based on different inputtypes.
Returns a function that takes *args **kwargs and returns a Polynomial.
"""
if isinstance(cop_curve, list):
def func(*args, **kwargs):
return Polynomial(cop_curve)
return func
return cop_curve
@lru_cache(maxsize=None)
def get_cop(self, heat_output, Tsink=None, Tsource=None):
"""Get COP corresponding to certain load.
Parameters:
-----------
heat_output : numeric
Thermal load in MW
Tsink : numeric
Sink temperature in degrees celcius
Tsource : numeric
Source temperature in degrees celcius
Notes:
------
Sign convention:
Positive values for thermal load
Negative values for electric load
"""
load_perc = heat_output / self.max_th_power
cop_curve = self.cop_curve
if not callable(cop_curve):
return cop_curve
else:
return cop_curve(Tsink=Tsink, Tsource=Tsource)(load_perc)
def th_to_el_power(self, heat_output, Tsink=None, Tsource=None):
if not self.min_th_power <= heat_output <= self.max_th_power:
warnings.warn(
f"Chosen heat output is out of range [{self.min_th_power} - {self.max_th_power}]. "
"Heat output is being limited to the closest boundary."
)
heat_output = min(max(heat_output, self.min_th_power), self.max_th_power)
cop = self.get_cop(heat_output=heat_output, Tsink=Tsink, Tsource=Tsource)
return -heat_output / cop
def set_load(self, *args, **kwargs):
raise NotImplementedError(
"Directly setting the electric load of the heatpump is not possible (yet). "
"Functionality will be implemented if there is a specific usecase for it."
)
@lru_cache(maxsize=None)
def set_heat_output(self, heat_output, Tsink=None, Tsource=None):
"""Set heat output in MWth, returns load of heatpump as tuple (MWe, MWth)"""
if not self.min_th_power <= heat_output <= self.max_th_power:
warnings.warn(
f"Chosen heat output is out of range [{self.min_th_power} - {self.max_th_power}]. "
"Heat output is being limited to the closest boundary."
)
heat_output = min(max(heat_output, self.min_th_power), self.max_th_power)
if Tsink is not None and Tsource is not None and Tsink <= Tsource:
raise ValueError(f"Tsource '{Tsource}' can not be higher than '{Tsink}'.")
cop = self.get_cop(heat_output=heat_output, Tsink=Tsink, Tsource=Tsource)
e_load = -heat_output / cop
return e_load, heat_output
def _cost_function(self, x, c1, c2, c3, Tsink=None, Tsource=None):
"""Objective function for set_opt_load function.
x = heatpump thermal load in MW
c1 = electricity_cost
c2 = alt_heat_price
c3 = demand
"""
return (
x / self.get_cop(heat_output=x, Tsink=Tsink, Tsource=Tsource) * c1
+ (c3 - x) * c2
)
@lru_cache(maxsize=None)
def set_opt_load(
self,
electricity_cost,
alt_heat_price,
demand,
Tsink=None,
Tsource=None,
tolerance=0.01,
):
"""Set optimal load of Heatpump with minimal total heat costs.
Function uses np.minimize_scalar to minimize cost function.
Parameters:
-----------
electricity_cost:
Cost of input electricity in /MWh(e)
alt_heat_price:
Price of heat from alternative source in /MWh(th)
demand:
Heat demand in MW(th)
Returns:
--------
Optimal load of heatpump as tuple (MWe, MWth)
"""
c1 = electricity_cost
c2 = alt_heat_price
c3 = demand
cop_curve = self.cop_curve
if isinstance(cop_curve, Number):
if c1 / cop_curve <= c2:
return self.max_th_power
else:
return self.min_th_power
obj_func = partial(
self._cost_function, c1=c1, c2=c2, c3=c3, Tsink=Tsink, Tsource=Tsource
)
low_bound = 0
up_bound = min(c3, self.max_th_power)
opt_th_load = minimize_scalar(
obj_func,
bounds=(low_bound, up_bound),
method="bounded",
options={"xatol": tolerance},
).x
opt_e_load, opt_th_load = self.set_heat_output(
opt_th_load, Tsink=Tsink, Tsource=Tsource
)
return opt_e_load, opt_th_load
class Battery(Asset):
"""Subclass for a Battery.
Battery is modeled as follows:
- Rated power is power in MW that battery can
import from and export to the grid
- Efficiency loss is applied at charging, meaning that
SoC increase when charging is lower than the SoC decrease
when discharging
"""
def __init__(
self,
name,
rated_power,
rated_capacity,
roundtrip_eff,
min_soc=0,
max_soc=1,
soc_at_start=None,
cycle_lifetime=None,
):
super().__init__(name=name, max_power=rated_power, min_power=-rated_power)
self.capacity = rated_capacity
self.min_soc = min_soc
self.max_soc = max_soc
self.min_chargelevel = min_soc * self.capacity
self.max_chargelevel = max_soc * self.capacity
self.rt_eff = roundtrip_eff
self.one_way_eff = np.sqrt(roundtrip_eff)
self.cycle_count = 0
self.cycle_lifetime = cycle_lifetime
soc_at_start = min_soc if soc_at_start is None else soc_at_start
self.set_chargelevel(soc_at_start * self.capacity)
def __repr__(self):
return (
f"Battery(self, rated_power={self.max_power}, rated_capacity={self.capacity}, "
f"roundtrip_eff={self.rt_eff}, min_soc={self.min_soc}, max_soc={self.max_soc})"
)
def get_soc(self):
"""Get the SoC in % (decimal value)"""
return self.chargelevel / self.capacity
def set_chargelevel(self, chargelevel):
"""Set the chargelevel in MWh. Will automatically change the SoC accordingly."""
# if round(chargelevel,2) < round(self.min_chargelevel,2) or round(chargelevel,2) > round(self.max_chargelevel,2):
# raise ValueError(
# f"Tried to set Charge Level to {chargelevel}. "
# f"Charge Level must be a value between "
# f"{self.min_chargelevel} and {self.max_chargelevel} (in MWh)"
# )
self.chargelevel = chargelevel
def set_load(self, load):
"""Set load of the battery.
Use negative values for charging and positive values for discharging.
Returns actual chargespeed, considering technical limitations of the battery.
Note: We currently assume all efficiency losses occur during charging (no losses during discharge)
"""
if not hasattr(self, "freq"):
raise AttributeError(
"Time frequency of the model is not defined. "
"Assign asset to a CaseStudy or use Asset.freq(). "
"to set de time frequency and try again."
)
load = super().set_load(load)
unbound_charging = self.MW_to_MWh(load)
if load < 0:
unbound_charging *= self.rt_eff
chargelevel = self.chargelevel
max_charging = chargelevel - self.max_chargelevel
max_discharging = chargelevel - self.min_chargelevel
bound_charging = min(max(unbound_charging, max_charging), max_discharging)
newcl = chargelevel - bound_charging
self.set_chargelevel(newcl)
if bound_charging < 0:
bound_charging /= self.rt_eff
self.cycle_count += abs(bound_charging / (self.capacity * 2))
return self.MWh_to_MW(bound_charging)
def charge(self, chargespeed):
"""Charge the battery with given chargespeed.
Redirects to Battery.set_load().
Returns load (negative value for charging).
"""
chargespeed = self.max_power if chargespeed == "max" else chargespeed
if chargespeed < 0:
raise ValueError(
f"Chargespeed should be always be a positive value by convention. "
f"Inserted {chargespeed}."
)
chargespeed = self.set_load(-chargespeed)
return chargespeed
def discharge(self, dischargespeed):
"""Discharge the battery by given amount.
Redirects to Battery.set_load().
Returns load (positive value for discharging).
"""
dischargespeed = self.max_power if dischargespeed == "max" else dischargespeed
if dischargespeed < 0:
raise ValueError(
f"Dischargespeed should be always be a positive value by convention. "
f"Inserted {dischargespeed}."
)
dischargespeed = self.set_load(dischargespeed)
return dischargespeed
def get_cost_per_cycle(self, cycle_lifetime):
return self.capex / self.cycle_lifetime
class EV(Battery):
def __init__(
self,
name,
rated_power,
rated_capacity,
roundtrip_eff,
min_soc=0,
max_soc=1,
soc_at_start=None,
id=None,
):
super().__init__(
name,
rated_power,
rated_capacity,
roundtrip_eff,
min_soc,
max_soc,
soc_at_start,
)
if id:
self.id = id
class HotWaterStorage(Battery):
"""Subclass for a storage asset.
Parameters:
-----------
rated_capacity : int/float
Rated capacity in MWh
min_buffer_level_perc : float
Minimum buffer level in %
buffer_level_at_start : float
Buffer level at start in %
"""
def __init__(
self,
name,
rated_power,
capacity_per_volume,
volume,
temperature,
min_storagelevel,
initial_storagelevel=None,
):
rated_capacity = capacity_per_volume * volume
if not initial_storagelevel:
initial_storagelevel = min_storagelevel
soc_at_start = initial_storagelevel / rated_capacity
max_storagelevel = rated_capacity * 0.95
min_soc = min_storagelevel / rated_capacity
max_soc = max_storagelevel / rated_capacity
self.temperature = temperature
super().__init__(
name=name,
rated_power=rated_power,
rated_capacity=rated_capacity,
roundtrip_eff=1,
min_soc=min_soc,
max_soc=max_soc,
soc_at_start=soc_at_start,
)
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name}, rated_power={self.max_power}, capacity={self.capacity}, "
f"temperature={self.temperature}, min_storagelevel={self.min_chargelevel})"
)
@property
def charging_power_limit(self):
max_charging_energy = self.max_chargelevel - self.chargelevel
return min(self.MWh_to_MW(max_charging_energy), -self.min_power)
@property
def discharging_power_limit(self):
max_discharging_energy = self.chargelevel - self.min_chargelevel
return min(self.MWh_to_MW(max_discharging_energy), self.max_power)
class GasBoiler(Asset):
"""Representation of a Gas-fired boiler.
name : str
Unique name of the asset
max_th_output : numeric
Maximum thermal output in MW thermal
efficiency : float
Thermal efficiency of the gasboiler as decimal value.
min_th_output : numeric
Minimum thermal output in MW thermal
"""
def __init__(
self,
name,
max_th_output,
min_th_output=0,
efficiency=0.9,
):
super().__init__(name=name, max_power=max_th_output, min_power=min_th_output)
self.efficiency = efficiency
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name}, max_power={self.max_power}, "
f"min_power={self.min_power}, efficiency={self.efficiency})"
)
def set_load(self, *args, **kwargs):
raise NotImplementedError(
"Gasboiler does not have electric load. "
"Use Gasboiler.set_heat_output() instead."
)
@lru_cache(maxsize=None)
def set_heat_output(self, output):
"""Redirect to Gasboiler.set_load()"""
heat_output = super().set_load(output)
gas_input = -heat_output / self.efficiency
return heat_output, gas_input
class Electrolyser(Asset):
def __init__(
self,
name,
rated_power,
kwh_per_kg=60,
min_flex_load_in_perc=15,
):
min_flex_power = min_flex_load_in_perc / 100 * rated_power
super().__init__(name=name, max_power=-min_flex_power, min_power=-rated_power)
self.rated_power = rated_power
self.min_flex_load = min_flex_load_in_perc
self.min_flex_power = self.min_flex_load / 100 * self.rated_power
self.kwh_per_kg = kwh_per_kg
self.kg_per_MWh = 1000 / self.kwh_per_kg
def __repr__(self):
return (
f"Electrolyser(name={self.name}, rated_power={self.rated_power}, "
f"kwh_per_kg={self.kwh_per_kg}, flex_range_in_perc=[{self.min_flex_load}, "
f"{self.max_flex_load}])"
)
def set_load(self, load):
"""Set load of the Electrolyser in MW."""
if not hasattr(self, "freq"):
raise AttributeError(
"Time frequency of the model is not defined. "
"Assign asset to a CaseStudy or use Asset.freq(). "
"to set de time frequency and try again."
)
load = -abs(load)
load = super().set_load(load)
h2_output_kg = self.MW_to_MWh(-load) * self.kg_per_MWh
return load, h2_output_kg
class Battolyser(Asset):
def __init__(
self,
name,
rated_power,
rated_capacity,
rt_eff,
soc_at_start=None,
):
super().__init__(name=name, max_power=rated_power, min_power=-rated_power)
self.capacity = rated_capacity
self.min_soc = 0.05
self.max_soc = 1.00
self.min_chargelevel = self.min_soc * self.capacity
self.max_chargelevel = self.max_soc * self.capacity
self.rt_eff = rt_eff
self.cycle_count = 0
soc_at_start = self.min_soc if soc_at_start is None else soc_at_start
self.set_chargelevel(soc_at_start * self.capacity)
def __repr__(self):
return (
f"Battolyser(name={self.name}, rated_power={self.max_power}, "
f"rated_capacity={self.capacity}, rt_eff={self.rt_eff})"
)
def get_soc(self):
"""Get the SoC in % (decimal value)"""
return self.chargelevel / self.capacity
def set_chargelevel(self, chargelevel):
"""Set the chargelevel in MWh. Will automatically change the SoC accordingly."""
if chargelevel < self.min_chargelevel or chargelevel > self.max_chargelevel:
raise ValueError(
f"Tried to set Charge Level to {chargelevel}. "
f"Charge Level must be a value between "
f"{self.min_chargelevel} and {self.max_chargelevel} (in MWh)"
)
self.chargelevel = chargelevel
def set_load(self, load):
"""Set load of the Battolyser in MW.
Use negative values for charging and positive values for discharging.
Returns actual chargespeed, considering technical limitations of the battery.
Note: We currently assume all efficiency losses occur during discharging
(no losses during charging)
"""
if not hasattr(self, "freq"):
raise AttributeError(
"Time frequency of the model is not defined. "
"Assign asset to a CaseStudy or use Asset.freq(). "
"to set de time frequency and try again."
)
load = super().set_load(load)
unbound_charging = self.MW_to_MWh(load)
if load > 0:
unbound_charging /= self.rt_eff
chargelevel = self.chargelevel
max_charging = chargelevel - self.max_chargelevel
max_discharging = chargelevel - self.min_chargelevel
bound_charging = min(max(unbound_charging, max_charging), max_discharging)
newcl = chargelevel - bound_charging
self.set_chargelevel(newcl)
if bound_charging > 0:
bound_charging *= self.rt_eff
charging_power = self.MWh_to_MW(bound_charging)
h2_power = -self.MWh_to_MW(max(bound_charging - unbound_charging, 0))
self.cycle_count += abs(bound_charging / (self.capacity * 2))
return charging_power, h2_power
def charge(self, chargespeed):
"""Charge the battery with given chargespeed.
Redirects to Battery.set_load().
Returns load (negative value for charging).
"""
chargespeed = self.max_power if chargespeed == "max" else chargespeed
if chargespeed < 0:
raise ValueError(
f"Chargespeed should be always be a positive value by convention. "
f"Inserted {chargespeed}."
)
chargespeed, h2_prod_in_MW = self.set_load(-chargespeed)
return chargespeed, h2_prod_in_MW
def discharge(self, dischargespeed):
"""Discharge the battery by given amount.
Redirects to Battery.set_load().
Returns load (positive value for discharging).
"""
dischargespeed = self.max_power if dischargespeed == "max" else dischargespeed
if dischargespeed < 0:
raise ValueError(
f"Dischargespeed should be always be a positive value by convention. "
f"Inserted {dischargespeed}."
)
dischargespeed = self.set_load(dischargespeed)[0]
return dischargespeed
##Added by Shahla, very similar to Hotwaterstorage
class HeatBuffer(Battery):
"""Subclass for a storage asset.
Parameters:
-----------
rated_capacity : int/float
Rated capacity in MWh
min_buffer_level_perc : float
Minimum buffer level in %
buffer_level_at_start : float
Buffer level at start in %
"""
def __init__(
self, name, rated_capacity, min_buffer_level_perc, buffer_level_at_start
):
super().__init__(
name=name,
rated_power=100,
rated_capacity=rated_capacity,
roundtrip_eff=1,
min_soc=min_buffer_level_perc,
max_soc=1,
soc_at_start=buffer_level_at_start,
)

@ -0,0 +1,11 @@
import os
from pathlib import Path
if os.environ.get("USERNAME") == "mekre":
BASEPATH = Path("C:\\Users\\mekre\\")
elif os.environ.get("USERNAME") == "christiaan.buitelaar":
BASEPATH = Path("C:\\Users\\christiaan.buitelaar\\Documents\\GitHub\\asset-case-studies\\")
elif os.environ.get("USERNAME") == "karel.van.doesburg":
BASEPATH = Path("C:\\Users\\karel.van.doesburg\\Documents\\asset_studies_recoy\\")
elif os.environ.get("USERNAME") == "shahla.huseynova":
BASEPATH = Path("C:\\Users\\shahla.huseynova\\Documents\\asset_studies_recoy\\asset-case-studies\\")

@ -0,0 +1,548 @@
import warnings
from copy import deepcopy
import numpy as np
import pandas as pd
from .framework import TimeFramework
from .financial import (
calc_business_case,
calc_co2_costs,
calc_electr_market_results,
calc_grid_costs,
calculate_eb_ode,
)
from .forecasts import Mipf, Qipf
from .prices import get_ets_prices, get_ets_prices_excel, get_ttf_prices
from .converters import EURpertonCO2_to_EURperMWh
class CaseStudy:
"""
Representation of a casestudy
"""
instances = {}
def __init__(self, time_fw: TimeFramework, freq, name, data=None, forecast=None):
self.name = name
self.modelled_time_period_years = time_fw.modelled_time_period_years
self.start = time_fw.start
self.end = time_fw.end
self.freq = freq
self.dt_index = time_fw.dt_index(freq)
self.data = pd.DataFrame(index=self.dt_index)
self.assets = {}
self.cashflows = {}
self.irregular_cashflows = {}
self.capex = {}
self.total_capex = 0
self.kpis = {}
amount_of_days_in_year = 365
if self.start.year % 4 == 0:
amount_of_days_in_year = 366
self.year_case_duration = (self.end - self.start).total_seconds() / (
3600 * 24 * amount_of_days_in_year
)
self.days_case_duration = self.year_case_duration * amount_of_days_in_year
self.hours_case_duration = self.days_case_duration * 24
self.quarters_case_duration = self.days_case_duration * 24 * 4
self.minutes_case_duration = self.days_case_duration * 24 * 60
# self.year_case_duration = 1
if data is not None:
if len(data) != len(self.data):
raise ValueError(
"Length of data is not same as length of CaseStudy.data"
)
data.index = self.dt_index
self.data = pd.concat([self.data, data], axis=1)
if forecast is not None:
self.add_forecast(forecast, freq)
CaseStudy.instances[self.name] = self
@classmethod
def list_instances(cls):
"""
Returns a list with all CaseStudy instances.
Useful if you want to iterate over all instances
or use them as input to a function.
"""
return list(cls.instances.values())
def add_forecast(self, forecast, freq):
"""
Add forecast and price data to the data table of the CaseStudy instance.
"""
# TODO Add error handling for frequencies
if forecast == "mipf" and freq == "1T":
forecast_data = Mipf(
start=self.start, end=self.end, tidy=True, include_nextQ=False
).data
elif forecast == "mipf" and freq == "15T":
forecast_data = Mipf(
start=self.start, end=self.end, tidy=False, include_nextQ=False
).data
elif forecast == "qipf":
forecast_data = Qipf(start=self.start, end=self.end, freq=self.freq).data
else:
raise ValueError("Forecast does not exist. Use 'mipf' or 'qipf'.")
self.data = pd.concat([self.data, forecast_data], axis=1)
def add_gasprices(self):
"""
Add gas price data (TTF day-head) to the data table of the CaseStudy instance.
"""
self.data["Gas prices (€/MWh)"] = get_ttf_prices(
start=self.start, end=self.end, freq=self.freq
)["Gas prices (€/MWh)"]
def add_co2prices(self, perMWh=False):
"""
Add CO2 prices (ETS) data to the data table of the CaseStudy instance.
"""
self.data["CO2 prices (€/ton)"] = get_ets_prices(
start=self.start, end=self.end, freq=self.freq
)["CO2 prices (€/MWh)"]
if perMWh:
self.data["CO2 prices (€/MWh)"] = EURpertonCO2_to_EURperMWh(
self.data["CO2 prices (€/ton)"]
).round(2)
def add_co2prices_excel(self, perMWh=False):
"""
Add CO2 prices (ETS) data to the data table of the CaseStudy instance.
"""
self.data["CO2 prices (€/ton)"] = get_ets_prices_excel(
start=self.start, end=self.end, freq=self.freq
)["CO2 prices (€/ton)"]
if perMWh:
self.data["CO2 prices (€/MWh)"] = EURpertonCO2_to_EURperMWh(
self.data["CO2 prices (€/ton)"]
).round(2)
def add_asset(self, asset):
"""Assign an Asset instance to CaseStudy instance.
Method will create a unique copy of the Asset instance.
If Asset contains financial information,
cashflows are automatically updated.
"""
assetcopy = deepcopy(asset)
assetcopy.set_freq(self.freq)
self.assets[assetcopy.name] = assetcopy
if hasattr(assetcopy, "opex"):
self.add_cashflow(f"{assetcopy.name} OPEX (€)", -assetcopy.opex)
if hasattr(assetcopy, "capex"):
self.add_capex(f"{assetcopy.name} CAPEX (€)", -assetcopy.capex)
if hasattr(assetcopy, "devex"):
self.add_capex(f"{assetcopy.name} DEVEX (€)", -assetcopy.devex)
def get_assets(self):
"""Returns all Asset instances assigned to CaseStudy instance."""
return list(self.assets.values())
def add_cashflow(self, label, amount):
"""Add a yearly cashflow to the CaseStudy
Convention is negative values for costs and positive values for revenue.
"""
self.cashflows[label] = round(amount, 2)
def add_capex(self, label, amount):
"""Add a capex component to the CaseStudy
Convention is to use positive values
"""
capex = round(amount, 2) * -1
self.capex[label] = capex
self.total_capex += capex
def add_irregular_cashflow(self, amount, year):
base = self.irregular_cashflows[year] if year in self.irregular_cashflows else 0
self.irregular_cashflows[year] = base + amount
def generate_electr_market_results(self, real_col, nom_col=None):
"""Generates a dictionary with results of the simulation on energy market.
Dictionary is saved in CaseStudy.energy_market_results.
Total market result is automatically added to cashflow dictionary.
"""
if nom_col is None:
nom_col = "Nom. vol."
self.data[nom_col] = 0
data = calc_electr_market_results(self.data, nom_col=nom_col, real_col=real_col)
self.data = data
total_produced = data["Prod. vol."].sum()
total_consumed = -data["Cons. vol."].sum()
self.total_electricity_cons = total_consumed * (-1)
selling = data[real_col] > 0
mean_selling_price = (
data["Combined Result"].where(selling).sum() / total_produced
if total_produced != 0
else 0
)
mean_buying_price = (
data["Combined Result"].where(~selling).sum() / total_consumed * (-1)
if round(total_consumed, 2) != 0
else 0
)
total_comb_result = data["Combined Result"].sum()
self.electr_market_results = {
"Total net volume (MWh)": data[real_col].sum(),
"Total exported to grid (MWh)": total_produced,
"Total consumed from grid (MWh)": total_consumed,
"Total nominated volume (MWh)": data[nom_col].sum(),
"Absolute imbalance volume (MWh)": data["Imb. vol."].abs().sum(),
"Mean selling price (€/MWh)": mean_selling_price,
"Mean buying price (€/MWh)": mean_buying_price,
"Total day-ahead result (€)": data["Day-Ahead Result"].sum(),
"Total POS result (€)": data["POS Result"].sum(),
"Total NEG result (€)": data["NEG Result"].sum(),
"Total imbalance result (€)": data["Imbalance Result"].sum(),
"Total combined result (€)": total_comb_result,
}
self.electr_market_result = total_comb_result
self.add_cashflow("Result on electricity market (€)", total_comb_result)
def add_gas_costs(self, gasvolumes_col, gasprice_col="Gas prices (€/MWh)"):
"""Calculate gas costs and add to cashflows
Parameters:
-----------
gasprices_col : str
Column containing gas prices in CaseStudy.data dataframe
gasvolumes_col : str
List of column names containing gas volumes in CaseStudy.data dataframe
"""
gasprices = self.data[gasprice_col]
gasvolumes = self.data[gasvolumes_col].abs()
gas_costs = gasprices * gasvolumes * -1
self.data["Gas commodity costs (€)"] = gas_costs
self.total_gas_cons = gasvolumes.sum()
self.total_gas_costs = round(gas_costs.sum(), 2)
self.add_cashflow("Gas consumption costs (€)", self.total_gas_costs)
def add_co2_costs(
self, volume_cols, co2_price_col="CO2 prices (€/ton)", fuel="gas"
):
"""Calculate co2 costs and add to cashflows
Parameters:
-----------
Gasprices : str
Column containing gas prices in CaseStudy.data dataframe
Gasvolumes : list
List of column names containing gas volumes in CaseStudy.data dataframe
"""
if isinstance(volume_cols, str):
volume_cols = [volume_cols]
co2_prices = self.data[co2_price_col]
volumes = [self.data[col] for col in volume_cols]
self.total_co2_costs = calc_co2_costs(
co2_prices=co2_prices, volumes=volumes, fuel=fuel
)
self.add_cashflow("CO2 emission costs (€)", self.total_co2_costs)
def add_eb_ode(
self,
commodity,
cons_col=None,
tax_bracket=None,
base_cons=None,
horti=False,
m3=False,
add_cons_MWh=0,
year=2020,
split=False,
):
"""Add EB & ODE to cashflows
See financial.calc_eb_ode() for more detailed documentation.
Parameters:
-----------
commodity : str
{'gas', 'electricity'}
cons_col : str
Optional parameter to specificy column name of the
consumption values in MWh.
tax_bracket : numeric
Tax bracket that the client is in [1-4]
Use either 'tax_bracket' of 'base_cons', not both.
base_cons : numeric
Base consumption volume of the client
Use either 'tax_bracket' of 'base_cons', not both.
horti : bool
Set to True to use horticulture rates
m3 : bool
Set to True if you want to enter gas volumes in m3
add_cons_MWh :
Enables manually adding extra consumption
"""
if cons_col:
cons = self.data[cons_col].abs().sum()
else:
cons = getattr(self, f"total_{commodity}_cons")
cons = cons + add_cons_MWh
eb, ode = calculate_eb_ode(
cons=cons,
electr=(commodity == "electricity"),
tax_bracket=tax_bracket,
base_cons=base_cons,
horti=horti,
m3=m3,
year=year,
)
if split:
self.add_cashflow(f"EB {commodity.capitalize()} (€)", eb)
self.add_cashflow(f"ODE {commodity.capitalize()} (€)", ode)
else:
self.add_cashflow(f"{commodity.capitalize()} taxes (€)", eb + ode)
def add_grid_costs(
self,
power_MW_col,
grid_operator,
year,
connection_type,
cons_MWh_col=None,
kw_contract_kW=None,
path=None,
add_peak_kW=0,
add_cons_MWh=0,
):
"""Add variable grid transport costs to cashflows
See financial.calc_grid_costs() for more detailed documentation.
Parameters:
-----------
power_MW_col : str
Column in data table with power usage in MW
grid_operator : str
{'tennet', 'liander', 'enexis', 'stedin'}
year : int
Year, e.g. 2020
connection_type : str
Connection type, e.g. 'HS'
cons_MWh_col : str
Column in data table containing grid consumption in MWh
kw_contract_kW : numeric
in kW. If provided, function will assume fixed value kW contract
path : str
Specify path with grid tariff files. Leave empty to use default path.
add_peak_kW : float
Enables manually adding peak consumption to the data
"""
cols = [power_MW_col]
if cons_MWh_col is not None:
cols.append(cons_MWh_col)
peaks_kW = (
(self.data[power_MW_col] * 1000 - add_peak_kW)
.resample("15T")
.mean()
.abs()
.resample("M")
.max()
.to_list()
)
cons_kWh = (
self.data[cons_MWh_col].sum() * 1000 if cons_MWh_col is not None else 0
) + add_cons_MWh
self.grid_costs = calc_grid_costs(
peakload_kW=peaks_kW,
grid_operator=grid_operator,
year=year,
connection_type=connection_type,
kw_contract_kW=kw_contract_kW,
totalcons_kWh=cons_kWh,
path=path,
modelled_time_period_years=self.modelled_time_period_years,
)
total_grid_costs = sum(self.grid_costs.values())
self.add_cashflow("Grid transport costs (€)", total_grid_costs)
def calculate_ebitda(self, project_duration, residual_value=None):
"""Calculate yearly EBITDA based on cashflows
Calculation table and EBITDA value are saved in CaseStudy.
"""
for key, val in self.cashflows.items():
if np.isnan(val):
warnings.warn(
f"Cashflow '{key}' for CaseStudy '{self.name}' contains NaN value. "
"Something might have gone wrong. Replacing NaN with 0 for now."
)
self.cashflows[key] = 0
assets = self.get_assets()
for asset in assets:
if not asset.depreciate:
pass
elif asset.lifetime is None:
raise ValueError(f"'lifetime' property of {asset.name} was not set.")
elif project_duration > asset.lifetime:
warnings.warn(
f"Project duration is larger than technical lifetime of asset '{asset.name}'. "
"Will continue by limiting project duration to the technical lifetime of the asset."
)
project_duration = int(asset.lifetime)
depreciations, residual_value = CaseStudy._calc_depr_and_residual_val(
assets, self.total_capex, residual_value, project_duration
)
self.ebitda = sum(self.cashflows.values())
self.ebitda_calc = deepcopy(self.cashflows)
self.ebitda_calc["EBITDA (€)"] = self.ebitda
self.ebitda_calc["Depreciation (€)"] = depreciations * -1
self.ebitda_calc["EBITDA + depr (€)"] = self.ebitda + depreciations * -1
def calculate_business_case(
self,
project_duration,
discount_rate,
residual_value=None,
baseline=None,
bl_res_value=None,
eia=False,
vamil=False,
fixed_income_tax=False,
):
"""Calculates business case (NPV, IRR) for the CaseStudy.
Business case calculation is stored in CaseStudy.business_case
NPV is stored in CaseStudy.npv
IRR is stored in Casestudy.irr
Parameters:
-----------
project_duration : int
In years
discount_rate : float
In % (decimal value)
residual_value : numeric
Can be used to manually set residual value of assets (all assets combined).
Defaults to None, in which case residual_value is calculated
based on linear depreciation over technical lifetime.
baseline : CaseStudy
Baseline to compare against
bl_res_value : numeric
Similar to 'residual_value' for baseline
eia : bool
Apply EIA ("Energie Investerings Aftrek") tax discounts.
Defaults to False.
vamil : bool
Apply VAMIL ("Willekeurige afschrijving milieu-investeringen") tax discounts.
Defaults to False.
"""
assets = self.get_assets()
for asset in assets:
if not asset.depreciate:
pass
elif asset.lifetime is None:
raise ValueError(f"'lifetime' property of {asset.name} was not set.")
elif project_duration > asset.lifetime:
warnings.warn(
f"Project duration is larger than technical lifetime of asset '{asset.name}'. "
"Will continue by limiting project duration to the technical lifetime of the asset."
)
project_duration = int(asset.lifetime)
capex = self.total_capex
yearly_ebitda = self.ebitda / self.modelled_time_period_years
irregular_cashflows = (
self._calc_irregular_cashflows(project_duration, baseline=baseline)
if self.irregular_cashflows
else 0
)
depreciations, residual_value = CaseStudy._calc_depr_and_residual_val(
assets, capex, residual_value, project_duration
)
if baseline is not None:
bl_assets = baseline.assets.values()
bl_capex = baseline.total_capex
bl_depr, bl_res_val = CaseStudy._calc_depr_and_residual_val(
bl_assets, bl_capex, bl_res_value, project_duration
)
capex -= bl_capex
depreciations -= bl_depr
residual_value -= bl_res_val
yearly_ebitda -= baseline.ebitda / self.modelled_time_period_years
self.business_case = calc_business_case(
capex=capex,
discount_rate=discount_rate,
project_duration=project_duration,
depreciation=depreciations,
residual_value=residual_value,
regular_earnings=yearly_ebitda,
irregular_cashflows=irregular_cashflows,
eia=eia,
vamil=vamil,
fixed_income_tax=fixed_income_tax,
)
self.irr = self.business_case.loc["IRR (%)", "Year 0"] / 100
self.npv = self.business_case.loc["NPV (€)", "Year 0"]
self.spp = self.business_case.loc["Simple Payback Period", "Year 0"]
@staticmethod
def _calc_depr_and_residual_val(assets, capex, residual_value, project_duration):
if residual_value is None:
assets = [asset for asset in assets if asset.depreciate]
depreciations = sum(
(asset.capex - asset.salvage_value) / asset.lifetime for asset in assets
)
residual_value = capex - depreciations * project_duration
else:
depreciations = (capex - residual_value) / project_duration
return depreciations, residual_value
def _calc_irregular_cashflows(self, project_duration, baseline=None):
irr_earnings = [0] * (project_duration)
for year, cashflow in self.irregular_cashflows.items():
if baseline:
cashflow -= baseline.irregular_cashflows.get(year, 0)
irr_earnings[int(year) - 1] = cashflow
return irr_earnings

@ -0,0 +1,43 @@
recoy_colordict = {
"RecoyDarkBlue": "#0e293b",
"RecoyBlue": "#1f8376",
"RecoyRed": "#dd433b",
"RecoyYellow": "#f3d268",
"RecoyGreen": "#46a579",
"RecoyPurple": "#6d526b",
"RecoyOrange": "#f2a541",
"RecoyBlueGrey": "#145561",
"RecoyDarkGrey": "#2a2a2a",
"RecoyLilac": "#C3ACCE",
"RecoyBrown": "#825E52",
"RecoyLightGreen": "#7E9181",
"RecoyCitron": "#CFD186",
"RecoyPink": "#F5B3B3"
}
recoy_greysdict = {
"RecoyLightGrey": "#e6e6e6",
"RecoyGrey": "#c0c0c0",
"RecoyDarkGrey": "#2a2a2a",
}
recoydarkblue = recoy_colordict["RecoyDarkBlue"]
recoyyellow = recoy_colordict["RecoyYellow"]
recoygreen = recoy_colordict["RecoyGreen"]
recoyred = recoy_colordict["RecoyRed"]
recoyblue = recoy_colordict["RecoyBlue"]
recoyorange = recoy_colordict["RecoyOrange"]
recoypurple = recoy_colordict["RecoyPurple"]
recoybluegrey = recoy_colordict["RecoyBlueGrey"]
recoylightgrey = recoy_greysdict["RecoyLightGrey"]
recoygrey = recoy_greysdict["RecoyGrey"]
recoydarkgrey = recoy_greysdict["RecoyDarkGrey"]
recoylilac = recoy_colordict["RecoyLilac"]
recoybrown = recoy_colordict["RecoyBrown"]
recoylightgreen = recoy_colordict["RecoyLightGreen"]
recoycitron = recoy_colordict["RecoyCitron"]
recoypink = recoy_colordict["RecoyPink"]
recoycolors = list(recoy_colordict.values())
transparent = "rgba(0, 0, 0, 0)"

@ -0,0 +1,66 @@
import pandas as pd
def MWh_to_m3(MWh):
return MWh / 9.769 * 1000
def MWh_to_GJ(MWh):
return MWh * 3.6
def EURperm3_to_EURperMWh(EURperm3):
return EURperm3 / 9.769 * 1000
def EURperMWh_to_EURperGJ(EURperMWh):
return EURperMWh * 3.6
def MWh_gas_to_tonnes_CO2(MWh):
return MWh * 1.84 / 9.769
def EURpertonCO2_to_EURperMWh(EURpertonCO2):
return EURpertonCO2 * 1.884 / 9.769
def EURperLHV_to_EURperHHV(MWh_LHV):
return MWh_LHV / 35.17 * 31.65
def EURperHHV_to_EURperLHV(MWh_HHV):
return MWh_HHV / 31.65 * 35.17
def GJ_gas_to_kg_NOX(GJ):
return GJ * 0.02
def MWh_gas_to_kg_NOX(MWh):
return GJ_gas_to_kg_NOX(MWh_to_GJ(MWh))
def fastround(n, decimals):
"""Round a value to certain number of decimals, faster than Python implementation"""
multiplier = 10**decimals
return int(n * multiplier + 0.5) / multiplier
def add_season_column(data):
"""Adds a column containing seasons to a DataFrame with datetime index"""
data["season"] = (data.index.month % 12 + 3) // 3
seasons = {1: "Winter", 2: "Spring", 3: "Summer", 4: "Fall"}
data["season"] = data["season"].map(seasons)
return data
def dt_column_to_local_time(column):
return column.dt.tz_localize("UTC").dt.tz_convert("Europe/Amsterdam")
def timestamp_to_utc(timestamp):
if isinstance(timestamp, str):
timestamp = pd.to_datetime(timestamp).tz_localize("Europe/Amsterdam")
return timestamp.tz_convert("UTC")

@ -0,0 +1,6 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441;0,0092;20,54;17,04
MS-D;441;0,0092;12,24;17,04
MS-T;441;0,0055;10,84;13,56
HS/MS;2760;0;16,95;20,52
TS;2760;0;12,01;16,56
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441 0,0092 20,54 17,04
3 MS-D 441 0,0092 12,24 17,04
4 MS-T 441 0,0055 10,84 13,56
5 HS/MS 2760 0 16,95 20,52
6 TS 2760 0 12,01 16,56

@ -0,0 +1,6 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441,00;0,0101;22,74;18,84
MS-D;441,00;0,0101;13,57;18,84
MS-T;441,00;0,0062;12,26;15,36
HS/MS;2760,00;0,00;18,71;22,68
TS;2760,00;0,00;13,68;18,84
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441,00 0,0101 22,74 18,84
3 MS-D 441,00 0,0101 13,57 18,84
4 MS-T 441,00 0,0062 12,26 15,36
5 HS/MS 2760,00 0,00 18,71 22,68
6 TS 2760,00 0,00 13,68 18,84

@ -0,0 +1,6 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441,00;0,0111;24,23;20,4
MS-D;441,00;0,0111;14,30;20,4
MS-T;441,00;0,0069;13,03;16,8
HS/MS;2760,00;0,00;20,12;24,72
TS;2760,00;0,00;14,43;21
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441,00 0,0111 24,23 20,4
3 MS-D 441,00 0,0111 14,30 20,4
4 MS-T 441,00 0,0069 13,03 16,8
5 HS/MS 2760,00 0,00 20,12 24,72
6 TS 2760,00 0,00 14,43 21

@ -0,0 +1,6 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441,00;0,0111;31,55;26,52
MS-D;441,00;0,0111;18,62;26,52
MS-T;441,00;0,0069;16,99;21,96
HS/MS;2760,00;0,00;26,19;32,16
TS;2760,00;0,00;18,80;27,36
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441,00 0,0111 31,55 26,52
3 MS-D 441,00 0,0111 18,62 26,52
4 MS-T 441,00 0,0069 16,99 21,96
5 HS/MS 2760,00 0,00 26,19 32,16
6 TS 2760,00 0,00 18,80 27,36

@ -0,0 +1,6 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441;0,0206;44,84;37,68
MS-D;441;0,0206;26,47;37,68
MS-T;441;0,0128;24,19;31,2
HS/MS;2760;0;37,2;45,84
TS;2760;0;26,89;38,64
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441 0,0206 44,84 37,68
3 MS-D 441 0,0206 26,47 37,68
4 MS-T 441 0,0128 24,19 31,2
5 HS/MS 2760 0 37,2 45,84
6 TS 2760 0 26,89 38,64

@ -0,0 +1,7 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441;0,0097;21,12;19,2
MS;441;0,0097;13,44;19,2
TS/MS;2760;0;21,84;27,24
HS/MS;2760;0;21,84;27,24
TS;2760;0;20,88;26,4
HS;2760;0;10,56;13,32
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441 0,0097 21,12 19,2
3 MS 441 0,0097 13,44 19,2
4 TS/MS 2760 0 21,84 27,24
5 HS/MS 2760 0 21,84 27,24
6 TS 2760 0 20,88 26,4
7 HS 2760 0 10,56 13,32

@ -0,0 +1,7 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441,00;0,01;23,16;20,88
MS;441,00;0,01;14,76;20,88
TS/MS;2760,00;0,00;24,12;30,00
HS/MS;2760,00;0,00;24,12;30,00
TS;2760,00;0,00;23,04;29,16
HS;2760,00;0,00;11,64;14,76
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441,00 0,01 23,16 20,88
3 MS 441,00 0,01 14,76 20,88
4 TS/MS 2760,00 0,00 24,12 30,00
5 HS/MS 2760,00 0,00 24,12 30,00
6 TS 2760,00 0,00 23,04 29,16
7 HS 2760,00 0,00 11,64 14,76

@ -0,0 +1,7 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441,00;0,0107;23,04;20,76
MS;441,00;0,0107;14,52;20,76
TS/MS;2760,00;0,00;23,28;32,40
HS/MS;2760,00;0,00;23,28;32,40
TS;2760,00;0,00;23,16;30,00
HS;2760,00;0,00;12,00;14,88
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441,00 0,0107 23,04 20,76
3 MS 441,00 0,0107 14,52 20,76
4 TS/MS 2760,00 0,00 23,28 32,40
5 HS/MS 2760,00 0,00 23,28 32,40
6 TS 2760,00 0,00 23,16 30,00
7 HS 2760,00 0,00 12,00 14,88

@ -0,0 +1,7 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441,00;0,0160;34,56;31,08
MS;441,00;0,0160;21,84;31,08
TS/MS;2760,00;0,00;35,04;48,72
HS/MS;2760,00;0,00;35,04;48,72
TS;2760,00;0,00;34,68;45,00
HS;2760,00;0,00;18,00;22,44
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441,00 0,0160 34,56 31,08
3 MS 441,00 0,0160 21,84 31,08
4 TS/MS 2760,00 0,00 35,04 48,72
5 HS/MS 2760,00 0,00 35,04 48,72
6 TS 2760,00 0,00 34,68 45,00
7 HS 2760,00 0,00 18,00 22,44

@ -0,0 +1,7 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;705,6;0,0256;55,296;49,728
MS;705,6;0,0256;34,944;49,728
TS/MS;4416;0;56,064;77,952
HS/MS;4416;0;56,064;77,952
TS;4416;0;55,488;72
HS;4416;0;28,8;35,904
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 705,6 0,0256 55,296 49,728
3 MS 705,6 0,0256 34,944 49,728
4 TS/MS 4416 0 56,064 77,952
5 HS/MS 4416 0 56,064 77,952
6 TS 4416 0 55,488 72
7 HS 4416 0 28,8 35,904

@ -0,0 +1,3 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
EHS;12478,96;0,00;13,29;16,20
HS;2760;0,00;23,58;27,48
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 EHS 12478,96 0,00 13,29 16,20
3 HS 2760 0,00 23,58 27,48

@ -0,0 +1,5 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441;0,01;23,00;18,80
MS;441;0,01;12,36;18,80
HS+TS/MS;2760;0,00;23,00;29,52
TS ;2760;0,00;21,80;30,48
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441 0,01 23,00 18,80
3 MS 441 0,01 12,36 18,80
4 HS+TS/MS 2760 0,00 23,00 29,52
5 TS 2760 0,00 21,80 30,48

@ -0,0 +1,5 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;453;0,01;25,04;19,16
MS;453;0,01;12,71;19,16
HS+TS/MS;2760;0,00;24,06;32,1
TS ;2760;0,00;23,25;31,73
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 453 0,01 25,04 19,16
3 MS 453 0,01 12,71 19,16
4 HS+TS/MS 2760 0,00 24,06 32,1
5 TS 2760 0,00 23,25 31,73

@ -0,0 +1,5 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441;0,01;38,06;29,12
MS;441;0,01;19,32;29,12
HS+TS/MS;2760;0,00;36,57;48,78
TS ;2760;0,00;35,33;48,22
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441 0,01 38,06 29,12
3 MS 441 0,01 19,32 29,12
4 HS+TS/MS 2760 0,00 36,57 48,78
5 TS 2760 0,00 35,33 48,22

@ -0,0 +1,5 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
MS/LS;441;0,0176;43,1;34,23
MS;441;0,0176;23;34,23
HS+TS/MS;2760;0;42,45;56,62
TS ;2760;0;33,46;46,52
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 MS/LS 441 0,0176 43,1 34,23
3 MS 441 0,0176 23 34,23
4 HS+TS/MS 2760 0 42,45 56,62
5 TS 2760 0 33,46 46,52

@ -0,0 +1,3 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
EHS;12478,96;0,00;13,29;16,20
HS;2760;0,00;23,58;27,48
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 EHS 12478,96 0,00 13,29 16,20
3 HS 2760 0,00 23,58 27,48

@ -0,0 +1,3 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
EHS;12478,96;0,00;14,80;18,00
HS;2760;0,00;24,80;29,04
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 EHS 12478,96 0,00 14,80 18,00
3 HS 2760 0,00 24,80 29,04

@ -0,0 +1,3 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
EHS;12478,96;0,00;11,45;14,88
HS;2760,00;0,00;23,70;29,16
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 EHS 12478,96 0,00 11,45 14,88
3 HS 2760,00 0,00 23,70 29,16

@ -0,0 +1,3 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
EHS;12478,96;0,00;27,98;36,36
HS;2760,00;0,00;41,04;50,52
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 EHS 12478,96 0,00 27,98 36,36
3 HS 2760,00 0,00 41,04 50,52

@ -0,0 +1,3 @@
Aansluiting;Vastrecht per jaar;kWh tarief;kW contract per jaar;kW max per jaar
EHS;12478,96;0,00;60,65;82,92
HS;2760,00;0,00;73,52;91,44
1 Aansluiting Vastrecht per jaar kWh tarief kW contract per jaar kW max per jaar
2 EHS 12478,96 0,00 60,65 82,92
3 HS 2760,00 0,00 73,52 91,44

@ -0,0 +1,72 @@
{
"0 t/m 10.000 kWh": {
"2013": 0.1165,
"2014": 0.1185,
"2015": 0.1196,
"2016": 0.1007,
"2017": 0.1013,
"2018": 0.10458,
"2019": 0.09863,
"2020": 0.0977,
"2021": 0.09428,
"2022": 0.03679,
"2023": 0.12599,
"2024": 0.1088
},
"10.001 t/m 50.000 kWh": {
"2013": 0.0424,
"2014": 0.0431,
"2015": 0.0469,
"2016": 0.04996,
"2017": 0.04901,
"2018": 0.05274,
"2019": 0.05337,
"2020": 0.05083,
"2021": 0.05164,
"2022": 0.04361,
"2023": 0.10046,
"2024": 0.09037
},
"50.001 t/m 10 miljoen kWh": {
"2013": 0.0113,
"2014": 0.0115,
"2015": 0.0125,
"2016": 0.01331,
"2017": 0.01305,
"2018": 0.01404,
"2019": 0.01421,
"2020": 0.01353,
"2021": 0.01375,
"2022": 0.01189,
"2023": 0.03942,
"2024": 0.03943
},
"meer dan 10 miljoen kWh particulier": {
"2013": 0.001,
"2014": 0.001,
"2015": 0.001,
"2016": 0.00107,
"2017": 0.00107,
"2018": 0.00116,
"2019": 0.00117,
"2020": 0.00111,
"2021": 0.00113,
"2022": 0.00114,
"2023": 0.00175,
"2024": 0.00254
},
"meer dan 10 miljoen kWh zakelijk": {
"2013": 0.0005,
"2014": 0.0005,
"2015": 0.0005,
"2016": 0.00053,
"2017": 0.00053,
"2018": 0.00057,
"2019": 0.00058,
"2020": 0.00055,
"2021": 0.00056,
"2022": 0.00057,
"2023": 0.00115,
"2024": 0.00188
}
}

@ -0,0 +1,67 @@
{
"0 t/m 10.000 kWh": {
"2013": 0.0011,
"2014": 0.0023,
"2015": 0.0036,
"2016": 0.0056,
"2017": 0.0074,
"2018": 0.0132,
"2019": 0.0189,
"2020": 0.0273,
"2021": 0.03,
"2022": 0.0305,
"2023": 0.00
},
"10.001 t/m 50.000 kWh": {
"2013": 0.0014,
"2014": 0.0027,
"2015": 0.0046,
"2016": 0.007,
"2017": 0.0123,
"2018": 0.018,
"2019": 0.0278,
"2020": 0.0375,
"2021": 0.0411,
"2022": 0.0418,
"2023": 0.00
},
"50.001 t/m 10 mln kWh": {
"2013": 0.0004,
"2014": 0.0007,
"2015": 0.0012,
"2016": 0.0019,
"2017": 0.0033,
"2018": 0.0048,
"2019": 0.0074,
"2020": 0.0205,
"2021": 0.0225,
"2022": 0.0229,
"2023": 0.00
},
"meer dan 10 mln kWh niet zakelijk": {
"2013": 0.000017,
"2014": 0.000034,
"2015": 0.000055,
"2016": 0.000084,
"2017": 0.000131,
"2018": 0.000194,
"2019": 0.0003,
"2020": 0.0004,
"2021": 0.0004,
"2022": 0.0005,
"2023": 0.00
},
"meer dan 10 mln kWh zakelijk": {
"2013": 0.000017,
"2014": 0.000034,
"2015": 0.000055,
"2016": 0.000084,
"2017": 0.000131,
"2018": 0.000194,
"2019": 0.0003,
"2020": 0.0004,
"2021": 0.0004,
"2022": 0.0005,
"2023": 0.00
}
}

@ -0,0 +1,86 @@
{
"0 t/m 5.000 m3 en blokverwarming": {
"2013": 0.1862,
"2014": 0.1894,
"2015": 0.1911,
"2016": 0.25168,
"2017": 0.25244,
"2018": 0.26001,
"2019": 0.29313,
"2020": 0.33307,
"2021": 0.34856,
"2022": 0.36322,
"2023": 0.48980,
"2024": 0.58301
},
"5.001 t/m 170.000 m3": {
"2013": 0.1862,
"2014": 0.1894,
"2015": 0.1911,
"2016": 0.25168,
"2017": 0.25244,
"2018": 0.26001,
"2019": 0.29313,
"2020": 0.33307,
"2021": 0.34856,
"2022": 0.36322,
"2023": 0.48980,
"2024": 0.58301
},
"170.001 t/m 1 miljoen m3": {
"2013": 0.0439,
"2014": 0.0446,
"2015": 0.0677,
"2016": 0.06954,
"2017": 0.06215,
"2018": 0.06464,
"2019": 0.06542,
"2020": 0.06444,
"2021": 0.06547,
"2022": 0.06632,
"2023": 0.09621,
"2024": 0.22378
},
"meer dan 1 miljoen t/m 10 miljoen m3": {
"2013": 0.016,
"2014": 0.0163,
"2015": 0.0247,
"2016": 0.02537,
"2017": 0.02265,
"2018": 0.02355,
"2019": 0.02383,
"2020": 0.02348,
"2021": 0.02386,
"2022": 0.02417,
"2023": 0.05109,
"2024": 0.12825
},
"meer dan 10 miljoen m3 particulier": {
"2013": 0.0115,
"2014": 0.0117,
"2015": 0.0118,
"2016": 0.01212,
"2017": 0.01216,
"2018": 0.01265,
"2019": 0.0128,
"2020": 0.01261,
"2021": 0.01281,
"2022": 0.01298,
"2023": 0.03919,
"2024": 0.04886
},
"meer dan 10 miljoen m3 zakelijk": {
"2013": 0.0115,
"2014": 0.0117,
"2015": 0.0118,
"2016": 0.01212,
"2017": 0.01216,
"2018": 0.01265,
"2019": 0.0128,
"2020": 0.01261,
"2021": 0.01281,
"2022": 0.01298,
"2023": 0.03919,
"2024": 0.04886
}
}

@ -0,0 +1 @@
{"0 t\/m 5.000 m\u00b3":{"2013":0.02991,"2014":0.03042,"2015":0.03069,"2016":0.04042,"2017":0.04054,"2018":0.04175,"2019":0.04707,"2020":0.05348,"2021":0.05597, "2022":0.05833, "2023":0.07867, "2024":0.09365},"5.001 t\/m 170.000 m\u00b3":{"2013":0.02991,"2014":0.03042,"2015":0.03069,"2016":0.04042,"2017":0.04054,"2018":0.04175,"2019":0.04707,"2020":0.05348,"2021":0.05597, "2022":0.05833, "2023":0.07867, "2024":0.09365},"170.001 t\/m 1\u00a0miljoen m\u00b3":{"2013":0.0222,"2014":0.02258,"2015":0.02278,"2016":0.02339,"2017":0.02346,"2018":0.0244,"2019":0.02469,"2020":0.02432,"2021":0.02471, "2022":0.02503, "2023":0.03629, "2024":0.08444},"meer dan 1\u00a0miljoen t\/m 10\u00a0miljoen m\u00b3":{"2013":0.016,"2014":0.0163,"2015":0.0247,"2016":0.02537,"2017":0.02265,"2018":0.02355,"2019":0.02383,"2020":0.02348,"2021":0.02386, "2022":0.02417, "2023":0.05109, "2024":0.12855},"meer dan 10 miljoen m\u00b3":{"2013":0.0115,"2014":0.0117,"2015":0.0118,"2016":0.01212,"2017":0.01216,"2018":0.01265,"2019":0.0128,"2020":0.01261,"2021":0.01281, "2022":0.01298, "2023":0.03919, "2024":0.04886}}

@ -0,0 +1 @@
{"0 t\/m 170.000 m\u00b3":{"2013":0.0004,"2014":0.0007,"2015":0.0012,"2016":0.0018,"2017":0.0026,"2018":0.0046,"2019":0.0084,"2020":0.0124,"2021":0.0137, "2022":0.0139, "2023":0.00},"170.001 t\/m 1 mln m\u00b3":{"2013":0.0004,"2014":0.0009,"2015":0.0014,"2016":0.0021,"2017":0.0025,"2018":0.004,"2019":0.0061,"2020":0.0081,"2021":0.0089, "2022":0.0090, "2023":0.00},"meer dan 1 mln t\/m 10 mln m\u00b3":{"2013":0.0003,"2014":0.0005,"2015":0.0008,"2016":0.0013,"2017":0.0027,"2018":0.0039,"2019":0.0059,"2020":0.0212,"2021":0.0232, "2022":0.0236, "2023":0.00},"meer dan 10 mln m\u00b3":{"2013":0.0002,"2014":0.0004,"2015":0.0006,"2016":0.0009,"2017":0.0013,"2018":0.0021,"2019":0.0031,"2020":0.0212,"2021":0.0232, "2022":0.0236, "2023":0.00}}

@ -0,0 +1,54 @@
{
"0 t/m 170.000 m3 en blokverwarming": {
"2013": 0.0023,
"2014": 0.0046,
"2015": 0.0074,
"2016": 0.0113,
"2017": 0.0159,
"2018": 0.0285,
"2019": 0.0524,
"2020": 0.0775,
"2021": 0.0851,
"2022": 0.0865,
"2023": 0.00
},
"170.001 t/m 1 mln m3": {
"2013": 0.0009,
"2014": 0.0017,
"2015": 0.0028,
"2016": 0.0042,
"2017": 0.0074,
"2018": 0.0106,
"2019": 0.0161,
"2020": 0.0214,
"2021": 0.0235,
"2022": 0.0239,
"2023": 0.00
},
"meer dan 1 mln t/m 10 mln m3": {
"2013": 0.0003,
"2014": 0.0005,
"2015": 0.0008,
"2016": 0.0013,
"2017": 0.0027,
"2018": 0.0039,
"2019": 0.0059,
"2020": 0.0212,
"2021": 0.0232,
"2022": 0.0236,
"2023": 0.00
},
"meer dan 10 mln m3": {
"2013": 0.0002,
"2014": 0.0004,
"2015": 0.0006,
"2016": 0.0009,
"2017": 0.0013,
"2018": 0.0021,
"2019": 0.0031,
"2020": 0.0212,
"2021": 0.0232,
"2022": 0.0236,
"2023": 0.00
}
}

@ -0,0 +1,25 @@
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import pytz
from sqlalchemy import DateTime, TypeDecorator
class DateTimeTZ(TypeDecorator):
impl = DateTime
def process_result_value(self, value, dialect):
if value is None:
return None
return value.replace(tzinfo=pytz.utc)
DATABASE_URL = 'mssql+pyodbc://rop:OptimalTransition@rop-prices-test.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com:8472/test?driver=ODBC+Driver+17+for+SQL+Server'
# DATABASE_URL = 'mssql+pyodbc://rop:OptimalTransition@rop-prices-dev-20230123.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com:1433/test?driver=ODBC+Driver+17+for+SQL+Server'
# DATABASE_URL = Config().SQLALCHEMY_DATABASE_URI
BASE = declarative_base()
ENGINE_PRICES = create_engine(
DATABASE_URL,
pool_size=2000,
max_overflow=0,
connect_args={"options": "-c timezone=utc"},
)
SESSION = sessionmaker(bind=ENGINE_PRICES)

@ -0,0 +1,69 @@
from sqlalchemy import create_engine, MetaData, Table
import pandas as pd
DATABASES = {
"ngsc_dev": {
"db_url": "ngsc-dev-msql.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "ngsc_dev",
"db_user": "ngsc_dev",
"db_password": "AKIAZQ2BV5F5K6LLBC47",
"db_port": "1433",
},
"ngsc_test": {
"db_url": "ngsc-test-msql.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "ngsc_test",
"db_user": "ngsc_test",
"db_password": "AKIAZQ2BV5F5K6LLBC47",
"db_port": "1433",
},
"ngsc_prod": {
"db_url": "rop-ngsc-prod.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "ngsc_test",
"db_user": "ngsc_test",
"db_password": "AKIAZQ2BV5F5K6LLBC47",
"db_port": "1433",
},
"rop_prices_test": {
"db_url": "rop-prices-test.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "test",
"db_user": "rop",
"db_password": "OptimalTransition",
"db_port": "8472",
},
"rop_assets_test": {
"db_url": "rop-assets-test.cyqrsg0mmelh.eu-central-1.rds.amazonaws.com",
"db_name": "test",
"db_user": "rop",
"db_password": "OptimalTransition",
"db_port": "1433",
},
}
def db_engine(db_name):
db_config = DATABASES[db_name]
connection_string = (
f"mssql+pyodbc://{db_config['db_user']}:{db_config['db_password']}"
f"@{db_config['db_url']}:{db_config['db_port']}/"
f"{db_config['db_name']}?driver=ODBC+Driver+17+for+SQL+Server"
)
return create_engine(connection_string)
def read_entire_table(table_name, db_engine):
return pd.read_sql_table(table_name, db_engine)
def create_connection(engine, tables):
connection = engine.connect()
metadata = MetaData()
if isinstance(tables, str):
return connection, Table(tables, metadata, autoload=True, autoload_with=engine)
else:
db_tables = {
table: Table(table, metadata, autoload=True, autoload_with=engine)
for table in tables
}
return connection, db_tables

@ -0,0 +1,17 @@
import time
from functools import wraps
def time_method(func):
"""Prints the runtime of a method of a class."""
@wraps(func)
def wrapper_timer(self, *args, **kwargs):
start = time.perf_counter()
value = func(self, *args, **kwargs)
end = time.perf_counter()
run_time = end - start
print(f"Finished running {self.name} in {run_time:.2f} seconds.")
return value
return wrapper_timer

@ -0,0 +1,678 @@
from pathlib import Path
from datetime import timedelta
import numpy as np
import numpy_financial as npf
import pandas as pd
import warnings
def npv(discount_rate, cashflows):
cashflows = np.array(cashflows)
return (cashflows / (1 + discount_rate) ** np.arange(1, len(cashflows) + 1)).sum(
axis=0
)
def calc_electr_market_results(model, nom_col=None, real_col=None):
"""Function to calculate the financial result on Day-Ahead and Imbalance market for the input model.
Parameters:
-----------
model : df
DataFrame containing at least 'DAM', 'POS' and 'NEG' columns.
nom_col : str
Name of the column containing the Day-Ahead nominations in MWh
Negative values = Buy, positive values = Sell
imb_col : str
Name of the column containing the Imbalance volumes in MWh
Negative values = Buy, positive values = Sell
Returns:
--------
Original df with added columns showing the financial results per timeunit.
"""
if nom_col is None:
nom_col = "Nom. vol."
model[nom_col] = 0
producing = model[real_col] > 0
model["Prod. vol."] = model[real_col].where(producing, other=0)
model["Cons. vol."] = model[real_col].where(~producing, other=0)
model["Imb. vol."] = model[real_col] - model[nom_col]
model["Day-Ahead Result"] = model[nom_col] * model["DAM"]
model["POS Result"] = 0
model["NEG Result"] = 0
posimb = model["Imb. vol."] > 0
model["POS Result"] = model["POS"] * model["Imb. vol."].where(posimb, other=0)
model["NEG Result"] = model["NEG"] * model["Imb. vol."].where(~posimb, other=0)
model["Imbalance Result"] = model["POS Result"] + model["NEG Result"]
model["Combined Result"] = model["Day-Ahead Result"] + model["Imbalance Result"]
return model
def calc_co2_costs(co2_prices, volumes, fuel):
"""Calculates gas market results
Parameters:
-----------
co2_prices : numeric or array
CO2 prices in /ton
volumes : list
List of arrays containing volumes
fuel : list
List of arrays containing gas volumes
Returns:
--------
Returns a single negative value (=costs) in
"""
if not isinstance(volumes, list):
volumes = [volumes]
emission_factors = {
"gas": 1.884 / 9.769
} # in ton/MWh (based on 1.884 kg CO2/Nm3, 9.769 kWh/Nm3)
if fuel not in emission_factors.keys():
raise NotImplementedError(
f"Emission factor for chosen fuel '{fuel}' is not implemented."
f"Implement it by adding emission factor to the 'emission_factors' table."
)
emission_factor = emission_factors[fuel]
return -round(
abs(sum((array * emission_factor * co2_prices).sum() for array in volumes)), 2
)
def calculate_eb_ode(
cons,
electr=True,
year=2020,
tax_bracket=None,
base_cons=None,
horti=False,
m3=False,
):
"""Calculates energy tax and ODE for consumption of electricity or natural gas in given year.
Function calculates total tax to be payed for electricity or natural gas consumption,
consisting of energy tax ('Energiebelasting') and sustainable energy surcharge ('Opslag Duurzame Energie').
Tax bracket that applies is based on consumption level, with a different tax
rate for each bracket.
For Gas
1: 0 - 170.000 m3
3: 170.000 - 1 mln. m3
4: 1 mln. - 10 mln. m3
5: > 10 mln. m3
For Electricity
1: 0 - 10 MWh
2: 10 - 50 MWh
3: 50 - 10.000 MWh
4: > 10.000 MWh
Parameters:
-----------
cons : numeric
Total consumption in given year for which to calculate taxes.
Electricity consumption in MWh
Gas consumption in MWh (or m3 and use m3=True)
electr : bool
Set to False for natural gas rates. Default is True.
year : int
Year for which tax rates should be used. Tax rates are updated
annually and can differ significantly.
tax_bracket : int
Tax bracket (1-4) to assume.
Parameter can not be used in conjunction ith 'base_cons'.
base_cons : numeric
Baseline consumption to assume, in same unit as 'cons'.
Specified value is used to decide what tax bracket to start in.
Taxes from baseline consumption are not included in calculation of the tax amount.
Parameter can not be used in conjunction with 'tax_bracket'.
horti : bool
The horticulture sector gets a discount on gas taxes.
m3 : bool
Set to True if you want to enter gas consumption in m3.
Default is to enter consumption in MWh.
Returns:
--------
Total tax amount as negative number (costs).
Note:
-----
This function is rather complicated, due to all its optionalities.
Should probably be simplified or split into different functions.
"""
if tax_bracket is not None and base_cons is not None:
raise ValueError(
"Parameters 'tax_bracket' and 'base_cons' can not be used at the same time."
)
if tax_bracket is None and base_cons is None:
raise ValueError(
"Function requires input for either 'tax_bracket' or 'base_cons'."
)
cons = abs(cons)
commodity = "electricity" if electr else "gas"
if commodity == "gas":
if not m3:
cons /= 9.769 / 1000 # Conversion factor for gas: 1 m3 = 9.769 kWh
base_cons = base_cons / (9.769 / 1000) if base_cons is not None else None
else:
cons *= 1000 # conversion MWh to kWh
base_cons = base_cons * 1000 if base_cons is not None else None
tax_brackets = {
"gas": [0, 170_000, 1_000_000, 10_000_000],
"electricity": [0, 10_000, 50_000, 10_000_000],
}
tax_brackets = tax_brackets[commodity]
base_cons = tax_brackets[tax_bracket - 1] if tax_bracket else base_cons
if commodity == "gas" and horti:
commodity += "_horticulture"
eb_rates, ode_rates = get_tax_tables(commodity)
eb = 0
ode = 0
for bracket in range(4):
if bracket < 3:
br_lower_limit = tax_brackets[bracket]
br_upper_limit = tax_brackets[bracket + 1]
if base_cons > br_upper_limit:
continue
bracket_size = br_upper_limit - max(br_lower_limit, base_cons)
cons_in_bracket = min(cons, bracket_size)
else:
cons_in_bracket = cons
# print(eb_rates.columns[bracket], cons_in_bracket, round(eb_rates.loc[year, eb_rates.columns[bracket]], 6), round(eb_rates.loc[year, eb_rates.columns[bracket]] * cons_in_bracket,2))
eb += eb_rates.loc[year, eb_rates.columns[bracket]] * cons_in_bracket
ode += ode_rates.loc[year, ode_rates.columns[bracket]] * cons_in_bracket
cons -= cons_in_bracket
if cons == 0:
break
return -round(eb, 2), -round(ode, 2)
def get_tax_tables(commodity):
"""Get EB and ODE tax rate tables from json files.
Returns two tax rate tables as DataFrame.
If table is not up-to-date, try use update_tax_tables() function.
"""
folder = Path(__file__).resolve().parent / "data" / "tax_tariffs"
eb_table = pd.read_json(folder / f"{commodity}_eb.json")
ode_table = pd.read_json(folder / f"{commodity}_ode.json")
if commodity == "electricity":
ode_table.drop(columns=ode_table.columns[3], inplace=True)
else:
eb_table.drop(columns=eb_table.columns[0], inplace=True)
if commodity != "gas_horticulture":
eb_table.drop(columns=eb_table.columns[3], inplace=True)
return eb_table, ode_table
def get_tax_rate(commodity, year, tax_bracket, perMWh=True):
"""Get tax rate for specific year and tax bracket.
Parameters:
-----------
commodity : str
{'gas' or 'electricity'}
year : int
{2013 - current year}
tax_bracket : int
{1 - 4}
For Gas:
1: 0 - 170.000 m3
3: 170.000 - 1 mln. m3
4: 1 mln. - 10 mln. m3
5: > 10 mln. m3
For Electricity:
1: 0 - 10 MWh
2: 10 - 50 MWh
3: 50 - 10.000 MWh
4: > 10.000 MWh
perMWh : bool
Defaults to True. Will return rates (for gas) in /MWh instead of /m3.
Returns:
--------
Dictionary with EB, ODE and combined rates (in /MWh for electricity and /m3 for gas)
{'EB' : float
'ODE' : float,
'EB+ODE' : float}
"""
eb_table, ode_table = get_tax_tables(commodity)
eb_rate = eb_table.loc[year, :].iloc[tax_bracket - 1].astype(float).round(5) * 1000
ode_rate = (
ode_table.loc[year, :].iloc[tax_bracket - 1].astype(float).round(5) * 1000
)
if commodity == "gas" and perMWh == True:
eb_rate /= 9.769
ode_rate /= 9.769
comb_rate = (eb_rate + ode_rate).round(5)
return {"EB": eb_rate, "ODE": ode_rate, "EB+ODE": comb_rate}
def update_tax_tables():
"""Function to get EB and ODE tax rate tables from belastingdienst.nl and save as json file."""
url = (
"https://www.belastingdienst.nl/wps/wcm/connect/bldcontentnl/belastingdienst/"
"zakelijk/overige_belastingen/belastingen_op_milieugrondslag/tarieven_milieubelastingen/"
"tabellen_tarieven_milieubelastingen?projectid=6750bae7-383b-4c97-bc7a-802790bd1110"
)
tables = pd.read_html(url)
table_index = {
3: "gas_eb",
4: "gas_horticulture_eb",
6: "electricity_eb",
8: "gas_ode",
9: "gas_horticulture_ode",
10: "electricity_ode",
}
for key, val in table_index.items():
table = tables[key].astype(str)
table = table.applymap(lambda x: x.strip("*"))
table = table.applymap(lambda x: x.strip(""))
table = table.applymap(lambda x: x.replace(",", "."))
table = table.astype(float)
table["Jaar"] = table["Jaar"].astype(int)
table.set_index("Jaar", inplace=True)
path = Path(__file__).resolve().parent / "data" / "tax_tariffs" / f"{val}.json"
table.to_json(path)
def calc_grid_costs(
peakload_kW,
grid_operator,
year,
connection_type,
totalcons_kWh=0,
kw_contract_kW=None,
path=None,
modelled_time_period_years = 1
):
"""Calculate grid connection costs for one full year
Parameters:
-----------
peakload_kW : numeric or list
Peak load in kW. Can be single value (for entire year) or value per month (list).
grid_operator : str
{'tennet', 'liander', 'enexis', 'stedin'}
year : int
Year to get tariffs for, e.g. 2020
connection_type : str
Type of grid connection, e.g. 'TS' or 'HS'.
Definitions are different for each grid operator.
totalcons_kWh : numeric
Total yearly consumption in kWh
kw_contract_kW : numeric
in kW. If provided, function will assume fixed value kW contract
path : str
Path to directory with grid tariff files.
Default is None; function will to look for default folder on SharePoint.
Returns:
--------
Total variable grid connection costs in /year (fixed costs 'vastrecht' nog included)
"""
totalcons_kWh /= modelled_time_period_years
tariffs = get_grid_tariffs_electricity(grid_operator, year, connection_type, path)
kw_max_kW = np.mean(peakload_kW)
max_peakload_kW = np.max(peakload_kW)
if kw_contract_kW is None:
kw_contract_kW = False
if bool(kw_contract_kW) & (kw_contract_kW < max_peakload_kW):
warnings.warn(
"Maximum peak consumption is higher than provided 'kw_contract' value."
"Will continue to assume max peak consumption as kW contract."
)
kw_contract_kW = max_peakload_kW
if not bool(kw_contract_kW):
kw_contract_kW = max_peakload_kW
if (tariffs["kWh tarief"] != 0) and (totalcons_kWh is None):
raise ValueError(
"For this grid connection type a tariff for kWh has to be paid. "
"Therefore 'totalcons_kWh' can not be None."
)
# kw_contract = 1000
# print("tarieven")
# print(abs(totalcons_kWh))
# print(modelled_time_period_years)
# print(-round(tariffs["kWh tarief"]))
# print({
# "Variable": -round(tariffs["kWh tarief"] * abs(totalcons_kWh) * modelled_time_period_years, 2),
# "kW contract": -round(tariffs["kW contract per jaar"] * kw_contract_kW * modelled_time_period_years, 2),
# "kW max": -round(tariffs["kW max per jaar"] * max_peakload_kW * modelled_time_period_years, 2),
# })
return {
"Variable": -round(tariffs["kWh tarief"] * abs(totalcons_kWh) * modelled_time_period_years, 2),
"kW contract": -round(tariffs["kW contract per jaar"] * kw_contract_kW * modelled_time_period_years, 2),
"kW max": -round(tariffs["kW max per jaar"] * max_peakload_kW * modelled_time_period_years, 2),
}
def get_grid_tariffs_electricity(grid_operator, year, connection_type, path=None):
"""Get grid tranposrt tariffs
Parameters:
-----------
grid_operator : str
{'tennet', 'liander', 'enexis', 'stedin'}
year : int
Year to get tariffs for, e.g. 2020
connection_type : str
Type of grid connection, e.g. 'TS' or 'HS'.
Definitions are different for each grid operator.
path : str
Path to directory with grid tariff files.
Default is None; function will to look for default folder on SharePoint.
Returns:
--------
Dictionary containing grid tariffs in /kW/year and /kWh
"""
if path is None:
path = Path(__file__).resolve().parent / "data" / "grid_tariffs"
else:
path = Path(path)
if not path.exists():
raise SystemError(
f"Path '{path}' not found. Specify different path and try again."
)
filename = f"{grid_operator.lower()}_{year}.csv"
filepath = path / filename
if not filepath.exists():
raise NotImplementedError(
f"File '{filename}' does not exist. Files available: {[file.name for file in path.glob('*.csv')]}"
)
rates_table = pd.read_csv(
path / filename, sep=";", decimal=",", index_col="Aansluiting"
)
if connection_type not in rates_table.index:
raise ValueError(
f"The chosen connection type '{connection_type}' is not available "
f"for grid operator '{grid_operator}'. Please choose one of {list(rates_table.index)}."
)
return rates_table.loc[connection_type, :].to_dict()
def income_tax(ebit, fixed_tax_rate):
"""
Calculates income tax based on EBIT.
2021 tax rates
"""
if fixed_tax_rate:
return round(ebit * -0.25, 0)
if ebit > 245_000:
return round(245_000 * -0.15 + (ebit - 200_000) * -0.25, 2)
if ebit < 0:
return 0
else:
return -round(ebit * 0.15, 2)
def calc_business_case(
capex,
discount_rate,
project_duration,
depreciation,
residual_value,
regular_earnings,
irregular_cashflows=0,
eia=False,
vamil=False,
fixed_income_tax=False
):
"""Calculate NPV and IRR for business case.
All input paremeters are either absolute or relative to a baseline.
Parameters:
-----------
capex : numeric
Total CAPEX or extra CAPEX compared to baseline
discount_rate : numeric
% as decimal value
project_duration : numeric
in years
depreciation : numeric of list
Yearly depreciation costs
residual_value : numeric
Residual value at end of project in , total or compared to baseline.
regular_earnings : numeric
Regular earnings, usually EBITDA
irregular_cashflows : list
Pass list with value for each year.
eia : bool
Apply EIA ("Energie Investerings Aftrek") tax discounts.
Defaults to False.
vamil : bool
Apply VAMIL ("Willekeurige afschrijving milieu-investeringen") tax discounts.
Defaults to False.
Returns:
--------
DataFrame showing complete calculation resulting in NPV and IRR
"""
years = [f"Year {y}" for y in range(project_duration + 1)]
years_o = years[1:]
bc_calc = pd.DataFrame(columns=years)
bc_calc.loc["CAPEX (€)", "Year 0"] = -capex
bc_calc.loc["Regular Earnings (€)", years_o] = regular_earnings
bc_calc.loc["Irregular Cashflows (€)", years_o] = irregular_cashflows
bc_calc.loc["EBITDA (€)", years_o] = (
bc_calc.loc["Regular Earnings (€)", years_o]
+ bc_calc.loc["Irregular Cashflows (€)", years_o]
)
depreciations = [depreciation] * project_duration
if vamil:
ebitdas = bc_calc.loc["EBITDA (€)", years_o].to_list()
depreciations = _apply_vamil(depreciations, project_duration, ebitdas)
bc_calc.loc["Depreciations (€) -/-", years_o] = np.array(depreciations) * -1
bc_calc.loc["EBIT (€)", years_o] = (
bc_calc.loc["EBITDA (€)", years_o]
+ bc_calc.loc["Depreciations (€) -/-", years_o]
)
if eia:
bc_calc = _apply_eia(bc_calc, project_duration, capex, years_o)
bc_calc.loc["Income tax (Vpb.) (€)", years_o] = bc_calc.loc["EBIT (€)", :].apply(
income_tax, args=[fixed_income_tax]
)
if eia:
bc_calc.loc["NOPLAT (€)", years_o] = (
bc_calc.loc["EBIT before EIA (€)", :]
+ bc_calc.loc["Income tax (Vpb.) (€)", years_o]
)
else:
bc_calc.loc["NOPLAT (€)", years_o] = (
bc_calc.loc["EBIT (€)", :] + bc_calc.loc["Income tax (Vpb.) (€)", years_o]
)
bc_calc.loc["Depreciations (€) +/+", years_o] = depreciations
bc_calc.loc["Free Cash Flow (€)", years] = (
bc_calc.loc["CAPEX (€)", years].fillna(0)
+ bc_calc.loc["NOPLAT (€)", years].fillna(0)
+ bc_calc.loc["Depreciations (€) +/+", years].fillna(0)
)
spp = calc_simple_payback_time(
capex=capex,
free_cashflows=bc_calc.loc["Free Cash Flow (€)", years_o].values,
)
bc_calc.loc["Simple Payback Period", "Year 0"] = spp
try:
bc_calc.loc["IRR (%)", "Year 0"] = (
npf.irr(bc_calc.loc["Free Cash Flow (€)", years].values) * 100
)
except:
bc_calc.loc["IRR (%)", "Year 0"] = np.nan
bc_calc.loc["WACC (%)", "Year 0"] = discount_rate * 100
bc_calc.loc["NPV of explicit period (€)", "Year 0"] = npv(
discount_rate, bc_calc.loc["Free Cash Flow (€)"].values
)
bc_calc.loc["Discounted residual value (€)", "Year 0"] = (
residual_value / (1 + discount_rate) ** project_duration
)
bc_calc.loc["NPV (€)", "Year 0"] = (
bc_calc.loc["NPV of explicit period (€)", "Year 0"]
# + bc_calc.loc["Discounted residual value (€)", "Year 0"]
)
return bc_calc.round(2)
def calc_simple_payback_time(capex, free_cashflows):
if free_cashflows.sum() < capex:
return np.nan
year = 0
spp = 0
while capex > 0:
cashflow = free_cashflows[year]
spp += min(capex, cashflow) / cashflow
capex -= cashflow
year += 1
return spp
return round(spp, 1)
def _apply_vamil(depreciations, project_duration, ebitdas):
remaining_depr = sum(depreciations)
remaining_vamil = 0.75 * remaining_depr
for i in range(project_duration):
vamil_depr = min(ebitdas[i], remaining_vamil) if remaining_vamil > 0 else 0
if remaining_depr > 0:
lin_depr = remaining_depr / (project_duration - i)
depr = max(vamil_depr, lin_depr)
depreciations[i] = max(vamil_depr, lin_depr)
remaining_vamil -= vamil_depr
remaining_depr -= depr
else:
depreciations[i] = 0
return depreciations
def _apply_eia(bc_calc, project_duration, capex, years_o):
remaining_eia = 0.45 * capex
eia_per_year = [0] * project_duration
bc_calc = bc_calc.rename(index={"EBIT (€)": "EBIT before EIA (€)"})
ebits = bc_calc.loc["EBIT before EIA (€)", years_o].to_list()
eia_duration = min(10, project_duration)
for i in range(eia_duration):
if remaining_eia > 0:
eia_curr_year = max(min(remaining_eia, ebits[i]), 0)
eia_per_year[i] = eia_curr_year
remaining_eia -= eia_curr_year
else:
break
bc_calc.loc["EIA (€)", years_o] = np.array(eia_per_year) * -1
bc_calc.loc["EBIT (€)", :] = (
bc_calc.loc["EBIT before EIA (€)", :] + bc_calc.loc["EIA (€)", :]
)
return bc_calc
def calc_irf_value(
data, irf_volume, nomination_col=None, realisation_col=None, reco_col="reco"
):
"""Calculate IRF value
Takes a DataFrame [data] and returns the same DataFrame with a new column "IRF Value"
Parameters
----------
data : DataFrame
DataFrame that contains data. Should include price data (DAM, POS and NEG).
irf_volume : int
Volume on IRF in MW.
nomination_col : str
Name of the column containing nomination data in MWh.
realisation_col : str
Name of the column containing realisation data in MWh.
reco_col : str
Name of the column contaning recommendations.
"""
if not nomination_col:
nomination_col = "zero_nom"
data[nomination_col] = 0
if not realisation_col:
realisation_col = "zero_nom"
data[realisation_col] = 0
conversion_factor = pd.to_timedelta(data.index.freq) / timedelta(hours=1)
imb_pre_irf = data[realisation_col] - data[nomination_col]
result_pre_irf = (
data[nomination_col] * data["DAM"]
+ imb_pre_irf.where(imb_pre_irf > 0, other=0) * data["POS"]
+ imb_pre_irf.where(imb_pre_irf < 0, other=0) * data["NEG"]
)
data["IRF Nom"] = (
data[nomination_col] - data[reco_col] * irf_volume * conversion_factor
)
data["IRF Imb"] = data[realisation_col] - data["IRF Nom"]
result_post_irf = (
data["IRF Nom"] * data["DAM"]
+ data["IRF Imb"].where(data["IRF Imb"] > 0, other=0) * data["POS"]
+ data["IRF Imb"].where(data["IRF Imb"] < 0, other=0) * data["NEG"]
)
data["IRF Value"] = result_post_irf - result_pre_irf
return data

@ -0,0 +1,293 @@
import json
import os
import time
import pytz
from datetime import datetime, timedelta
from pathlib import Path
import numpy as np
import pandas as pd
import requests
from pyrecoy.prices import *
class Forecast:
"""Load dataset from SharePoint server as DataFrame in local datetime format.
Parameters:
----------
filename : str
Name of the csv file, e.g. "marketprices_nl.csv"
start : datetime
Startdate of the dataset
end : datetime
Enddate of the dataset
freq : str
{'1T', '15T', 'H'}
Time frequency of the data
folder_path : str
Local path to forecast data on Recoy SharePoint,
e.g. "C:/Users/username/Recoy/Recoy - Documents/03 - Libraries/12 - Data Management/Forecast Data/"
"""
def __init__(self, filename, start=None, end=None, freq="15T", folder_path=None, from_database=False, add_days_to_start_end=False):
self.file = filename
self.from_database = from_database
if isinstance(start, str):
start = datetime.strptime(start, "%Y-%m-%d").astimezone(pytz.timezone('Europe/Amsterdam'))
print(start)
if isinstance(end, str):
end = datetime.strptime(end, "%Y-%m-%d").astimezone(pytz.timezone('Europe/Amsterdam'))
print(end)
self.data = self.get_dataset(start, end, freq, folder_path=folder_path, add_days_to_start_end=add_days_to_start_end)
# print(self.data)
if len(self.data) == 0:
raise Exception("No data available for those dates.")
def get_dataset(self, start, end, freq, folder_path=None, add_days_to_start_end=False):
if folder_path is None and self.from_database:
if add_days_to_start_end:
start = start + timedelta(days=-1)
end = end + timedelta(days=1)
start = start.astimezone(pytz.utc)
end = end.astimezone(pytz.utc)
dam = get_day_ahead_prices_from_database(start, end, 'NLD')
dam = dam.resample('15T').ffill()
imb = get_imbalance_prices_from_database(start, end, 'NLD')
data = pd.concat([imb, dam], axis='columns')
data = data[['DAM', 'POS', 'NEG']]
data = data.tz_convert('Europe/Amsterdam')
# data = data.loc[(data.index >= start) & (data.index < end)]
return data
def reindex_to_freq(self, freq):
"""Reindex dataset to a different timefrequency.
Parameters:
-----------
freq : string
options: '1T'
"""
ix_start = pd.to_datetime(self.data.index[0], utc=True).tz_convert(
"Europe/Amsterdam"
)
ix_end = pd.to_datetime(self.data.index[-1], utc=True).tz_convert(
"Europe/Amsterdam"
)
idx = pd.date_range(
ix_start, ix_end + timedelta(minutes=14), freq=freq, tz="Europe/Amsterdam"
)
self.data = self.data.reindex(index=idx, method="ffill")
class Mipf(Forecast):
"""
Load MIPF dataset from SharePoint server as DataFrame in local datetime format.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
forecasts = get_imbalance_forecasts_from_database_on_quarter_start_time(kwargs['start'], kwargs['end'],'NLD')
forecasts['PublicationTime'] = pd.to_datetime(forecasts['PublicationTime'], utc=True)
forecasts['PublicationTime'] = forecasts['PublicationTime'].dt.ceil('min')
forecasts['PublicationTime'] = forecasts['PublicationTime'].dt.tz_convert('Europe/Amsterdam')
forecasts['QuarterStartTime'] = pd.to_datetime(forecasts['QuarterStartTime'], utc=True)
forecasts['QuarterStartTime'] = forecasts['QuarterStartTime'].dt.tz_convert('Europe/Amsterdam')
forecasts = forecasts.loc[
(forecasts['PublicationTime'] >= forecasts['QuarterStartTime'])
& (forecasts['PublicationTime'] < forecasts['QuarterStartTime'] + pd.Timedelta(minutes=15))
]
forecasts = forecasts.groupby('PublicationTime').last()
forecasts.index.name = ''
forecasts = forecasts[['ForeNeg','ForePos']]
prices = self.data
idx = pd.date_range(
prices.index[0], prices.index[-1] + timedelta(minutes=14), freq='1T', tz="Europe/Amsterdam"
)
prices = prices.reindex(idx, method='ffill')
res = pd.concat([prices, forecasts], axis='columns')
self.data = res
def tidy_mipf(data, include_price_data=True, include_nextQ=False):
"""Takes MIPF dataset (unstacked) and turns it into a tidy dataset (stacked).
Parameters:
----------
include_price_data : bool
Set as True if columns 'DAM', 'POS' and 'NEG' data should be included in the output.
include_nextQ : bool
Set to True to include next Qh forecast
"""
mipf_pos = data[[f"POS_horizon{h}" for h in np.flip(np.arange(3, 18))]].copy()
mipf_neg = data[[f"NEG_horizon{h}" for h in np.flip(np.arange(3, 18))]].copy()
cols = ["ForePos", "ForeNeg"]
dfs = [mipf_pos, mipf_neg]
if include_nextQ:
pos_nextQ = data[[f"POS_horizon{h}" for h in np.flip(np.arange(18, 30))]].copy()
neg_nextQ = data[[f"NEG_horizon{h}" for h in np.flip(np.arange(18, 30))]].copy()
for h in np.arange(30, 33):
pos_nextQ.insert(0, f"POS_horizon{h}", np.NaN)
neg_nextQ.insert(0, f"POS_horizon{h}", np.NaN)
cols += ["ForePos_nextQ", "ForeNeg_nextQ"]
dfs += [pos_nextQ, neg_nextQ]
tidy_df = pd.DataFrame()
for df, col in zip(dfs, cols):
df.columns = range(15)
df.reset_index(drop=True, inplace=True)
df.reset_index(inplace=True)
df_melt = (
df.melt(id_vars=["index"], var_name="min", value_name=col)
.sort_values(["index", "min"])
.reset_index(drop=True)
)
tidy_df[col] = df_melt[col]
ix_start = data.index[0]
ix_end = data.index[-1] + timedelta(minutes=14)
tidy_df.index = pd.date_range(ix_start, ix_end, freq="1T", tz="Europe/Amsterdam")
tidy_df.index.name = "datetime"
if include_price_data:
for col in np.flip(["DAM", "POS", "NEG", "regulation state"]):
try:
price_col = data.loc[:, col].reindex(
index=tidy_df.index, method="ffill"
)
if col == "regulation state":
price_col.name = "RS"
tidy_df = pd.concat([price_col, tidy_df], axis="columns")
except Exception as e:
print(e)
return tidy_df
class Qipf(Forecast):
"""Load QIPF dataset from SharePoint server as DataFrame in local datetime format.
Parameters:
----------
start : datetime
Startdate of the dataset
end : datetime
Enddate of the dataset
folder_path : str
Local path to forecast data on Recoy SharePoint,
e.g. "C:/Users/username/Recoy/Recoy - Documents/03 - Libraries/12 - Data Management/Forecast Data/"
"""
def __init__(self, start=None, end=None, freq="15T", folder_path=None):
self.file = "imbalance_nl.csv"
self.data = self.get_dataset(start, end, "15T", folder_path=folder_path)
if freq != "15T":
self.reindex_to_freq(freq)
class Irf(Forecast):
"""Load QIPF dataset from SharePoint server as DataFrame in local datetime format."""
def __init__(
self, country, horizon, start=None, end=None, freq="60T", folder_path=None
):
if freq == "15T":
self.file = f"irf_{country}_{horizon}_15min.csv"
else:
self.file = f"irf_{country}_{horizon}.csv"
self.data = self.get_dataset(start, end, freq, folder_path=folder_path)
return data
class NsideApiRequest:
"""
Request forecast data from N-SIDE API
If request fails, code will retry 5 times by default.
Output on success: data as DataFrame, containing forecast data. Index is timezone-aware datetime (Dutch time).
Output on error: []
"""
def __init__(
self,
endpoint,
country,
start=None,
end=None,
auth_token=None,
):
if not auth_token:
try:
auth_token = os.environ["NSIDE_API_KEY"]
except:
raise ValueError("N-SIDE token not provided.")
self.data = self.get_data(auth_token, endpoint, country, start, end)
def get_data(self, token, endpoint, country, start, end):
if start is not None:
start = pd.to_datetime(start).strftime("%Y-%m-%d")
if end is not None:
end = pd.to_datetime(end).strftime("%Y-%m-%d")
url = f"https://energy-forecasting-api.eu.n-side.com/api/forecasts/{country}/{endpoint}"
if start and end:
url += f"?from={start}&to={end}"
print(url)
headers = {"Accept": "application/json", "Authorization": f"Token {token}"}
retry = 5
self.success = False
i = 0
while i <= retry:
resp = requests.get(url, headers=headers)
self.statuscode = resp.status_code
if self.statuscode == requests.codes.ok:
self.content = resp.content
json_data = json.loads(self.content)
data = pd.DataFrame(json_data["records"])
data = data.set_index("datetime")
data.index = pd.to_datetime(data.index, utc=True).tz_convert(
"Europe/Amsterdam"
)
self.success = True
return data.sort_index()
else:
print(
f"Attempt failled, status code {str(self.statuscode)}. Trying again..."
)
time.sleep(5)
i += 1
if not self.success:
print(
"Request failed. Please contact your Recoy contact person or try again later."
)
return []

@ -0,0 +1,61 @@
import warnings
from datetime import datetime, timedelta
import pandas as pd
import pytz
class TimeFramework:
"""
Representation of the modelled timeperiod.
Variables in this class are equal for all CaseStudies.
"""
def __init__(self, start, end):
print('testets')
if type(start) is str:
start = pytz.timezone("Europe/Amsterdam").localize(
datetime.strptime(start, "%Y-%m-%d")
)
if type(end) is str:
end = pytz.timezone("Europe/Amsterdam").localize(
datetime.strptime(end, "%Y-%m-%d")
)
end += timedelta(days=1)
end -= timedelta(minutes=1)
self.start = start
self.end = end
amount_of_days = 365
if start.year % 4 == 0:
amount_of_days = 366
self.days = (self.end - self.start + timedelta(days=1)) / timedelta(days=1)
self.modelled_time_period_years = (end - start).total_seconds() / (3600 * 24 * amount_of_days)
if self.days != 365:
warnings.warn(
f"The chosen timeperiod spans {self.days} days, "
"which is not a full year. Beware that certain "
"functions that use yearly rates might return "
"incorrect values."
)
def dt_index(self, freq):
# Workaround to make sure time range is always complete,
# Even with DST changes
# end = self.end + timedelta(days=1) # + timedelta(hours=1)
# end = self.end
# end - timedelta(end.hour)
return pd.date_range(
start=self.start,
end=self.end,
freq=freq,
tz="Europe/Amsterdam",
# inclusive="left",
name="datetime",
)

@ -0,0 +1,226 @@
import numpy as np
import pandas as pd
import warnings
from tqdm.notebook import tqdm
from .prices import get_tennet_data, get_balansdelta_nl
from .forecasts import Forecast
# TODO: This whole thing needs serious refactoring /MK
def generate_intelligent_baseline(startdate, enddate):
bd = get_balansdelta_nl(start=startdate, end=enddate)
bd.drop(
columns=[
"datum",
"volgnr",
"tijd",
"IGCCBijdrage_op",
"IGCCBijdrage_af",
"opregelen_reserve",
"afregelen_reserve",
],
inplace=True,
)
net_regelvolume = bd["opregelen"] - bd["Afregelen"]
bd.insert(2, "net_regelvolume", net_regelvolume)
vol_delta = bd["net_regelvolume"].diff(periods=1)
bd.insert(3, "vol_delta", vol_delta)
pc = get_tennet_data(
exporttype="verrekenprijzen", start=startdate, end=enddate
).reindex(index=bd.index, method="ffill")[["prikkelcomponent"]]
if len(pc) == 0:
pc = pd.Series(0, index=bd.index)
prices = Forecast("marketprices_nl.csv", start=startdate, end=enddate)
prices.reindex_to_freq("1T")
prices = prices.data
inputdata = pd.concat([prices, bd, pc], axis=1)
Qhs = len(inputdata) / 15
if Qhs % 1 > 0:
raise Exception(
"A dataset with incomplete quarter-hours was passed in, please insert new dataset!"
)
data = np.array([inputdata[col].to_numpy() for col in inputdata.columns])
lstoutput = []
for q in tqdm(range(int(Qhs))):
q_data = [col[q * 15 : (q + 1) * 15] for col in data]
q_output = apply_imbalance_logic_for_quarter_hour(q_data)
if lstoutput:
for (ix, col) in enumerate(lstoutput):
lstoutput[ix] += q_output[ix]
else:
lstoutput = q_output
ib = pd.DataFrame(
lstoutput,
index=[
"DAM",
"POS",
"NEG",
"regulation state",
"ib_inv",
"ib_afn",
"ib_rt",
"nv_op",
"nv_af",
"opgeregeld",
"afgeregeld",
],
).T
ib.index = inputdata.index
return ib
def apply_imbalance_logic_for_quarter_hour(q_data):
[nv_op, nv_af, opgeregeld, afgeregeld] = [False] * 4
lst_inv = [np.NaN] * 15
lst_afn = [np.NaN] * 15
lst_rt = [np.NaN] * 15
lst_nv_op = [np.NaN] * 15
lst_nv_af = [np.NaN] * 15
lst_afr = [np.NaN] * 15
lst_opr = [np.NaN] * 15
mins = iter(range(15))
for m in mins:
[
DAM,
POS,
NEG,
rt,
vol_op,
vol_af,
net_vol,
delta_vol,
nood_op,
nood_af,
prijs_hoog,
prijs_mid,
prijs_laag,
prikkelc,
] = [col[0 : m + 1] for col in q_data]
delta_vol[0] = 0
if nood_op.sum() > 0:
nv_op = True
if nood_af.sum() > 0:
nv_af = True
if pd.notna(prijs_hoog).any() > 0:
opgeregeld = True
if pd.notna(prijs_laag).any() > 0:
afgeregeld = True
if (opgeregeld == True) and (afgeregeld == False):
regeltoestand = 1
elif (opgeregeld == False) and (afgeregeld == True):
regeltoestand = -1
elif (opgeregeld == False) and (afgeregeld == False):
if nv_op == True:
regeltoestand = 1
if nv_af == True:
regeltoestand = -1
else:
regeltoestand = 0
else:
# Zowel opregeld als afregeld > kijk naar trend
# Continue niet-dalend: RT1
# Continue dalend: RT -1
# Geen continue trend: RT 2
if all(i >= 0 for i in delta_vol):
regeltoestand = 1
elif all(i <= 0 for i in delta_vol):
regeltoestand = -1
else:
regeltoestand = 2
# Bepaal de verwachte onbalansprijzen
dam = DAM[0]
pc = prikkelc[0]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hoogste_prijs = np.nanmax(prijs_hoog)
mid_prijs = prijs_mid[-1]
laagste_prijs = np.nanmin(prijs_laag)
if regeltoestand == 0:
prijs_inv = mid_prijs
prijs_afn = mid_prijs
elif regeltoestand == -1:
if nv_af:
prijs_afn = np.nanmin((dam - 200, laagste_prijs))
else:
prijs_afn = laagste_prijs
prijs_inv = prijs_afn
elif regeltoestand == 1:
if nv_op:
prijs_inv = np.nanmax((dam + 200, hoogste_prijs))
else:
prijs_inv = hoogste_prijs
prijs_afn = prijs_inv
elif regeltoestand == 2:
if nv_op:
prijs_afn = np.nanmax((dam + 200, hoogste_prijs, mid_prijs))
else:
prijs_afn = np.nanmax((mid_prijs, hoogste_prijs))
if nv_af:
prijs_inv = np.nanmin((dam - 200, laagste_prijs, mid_prijs))
else:
prijs_inv = np.nanmin((mid_prijs, laagste_prijs))
prijs_inv -= pc
prijs_afn += pc
lst_inv[m] = prijs_inv
lst_afn[m] = prijs_afn
lst_rt[m] = regeltoestand
lst_nv_op[m] = nv_op
lst_nv_af[m] = nv_af
lst_opr[m] = opgeregeld
lst_afr[m] = afgeregeld
return [
list(DAM),
list(POS),
list(NEG),
list(rt),
lst_inv,
lst_afn,
lst_rt,
lst_nv_op,
lst_nv_af,
lst_opr,
lst_afr,
]

@ -0,0 +1,143 @@
import plotly.graph_objects as go
from millify import millify
from plotly import figure_factory as ff
from .colors import *
from .reports import SingleFigureComparison
def npv_bar_chart(
cases, color=recoydarkblue, title="NPV Comparison in k€", n_format="%{text:.3s}"
):
series = SingleFigureComparison(cases, "npv", "NPV (€)").report
case_names = series.index
npvs = series.values
return single_figure_barchart(npvs, case_names, title, color, n_format)
def irr_bar_chart(
cases, color=recoydarkblue, title="IRR Comparison in %", n_format="%{text:.1f}%"
):
series = SingleFigureComparison(cases, "irr", "IRR (€)").report
case_names = series.index
irrs = series.values * 100
return single_figure_barchart(irrs, case_names, title, color, n_format)
def ebitda_bar_chart(
cases, color=recoydarkblue, title="EBITDA comparison in k€", n_format="%{text:.3s}"
):
series = SingleFigureComparison(cases, "ebitda", "EBITDA (€)").report
case_names = series.index
ebitdas = series.values
return single_figure_barchart(ebitdas, case_names, title, color, n_format)
def capex_bar_chart(
cases, color=recoydarkblue, title="CAPEX comparison in k€", n_format="%{text:.3s}"
):
series = SingleFigureComparison(cases, "total_capex", "CAPEX (€)").report
case_names = series.index
capex = series.values * -1
return single_figure_barchart(capex, case_names, title, color, n_format)
def single_figure_barchart(y_values, x_labels, title, color, n_format):
fig = go.Figure()
fig.add_trace(
go.Bar(
x=x_labels,
y=y_values,
text=y_values,
marker_color=color,
cliponaxis=False,
)
)
fig.update_layout(title=title)
ymin = min(y_values.min(), 0) * 1.1
ymax = max(y_values.max(), 0) * 1.1
fig.update_yaxes(range=[ymin, ymax])
fig.update_traces(texttemplate=n_format, textposition="outside")
return fig
def heatmap(
data,
title=None,
labels=None,
colormap="reds",
mult_factor=1,
decimals=2,
min_value=None,
max_value=None,
width=600,
height=400,
hover_prefix=None,
reversescale=False,
):
data_lists = (data * mult_factor).round(decimals).values.tolist()
xs = data.columns.tolist()
ys = data.index.to_list()
annotations = (
(data * mult_factor)
.applymap(lambda x: millify(x, precision=decimals))
.values.tolist()
)
if hover_prefix:
hover_labels = [
[f"{hover_prefix} {ann}" for ann in sublist] for sublist in annotations
]
else:
hover_labels = annotations
# This is an ugly trick to fix a bug with
# the axis labels not showing correctly
xs_ = [f"{str(x)}_" for x in xs]
ys_ = [f"{str(y)}_" for y in ys]
fig = ff.create_annotated_heatmap(
data_lists,
x=xs_,
y=ys_,
annotation_text=annotations,
colorscale=colormap,
showscale=True,
text=hover_labels,
hoverinfo="text",
reversescale=reversescale,
)
# Part 2 of the bug fix
fig.update_xaxes(tickvals=xs_, ticktext=xs)
fig.update_yaxes(tickvals=ys_, ticktext=ys)
fig.layout.xaxis.type = "category"
fig.layout.yaxis.type = "category"
fig["layout"]["xaxis"].update(side="bottom")
if min_value:
fig["data"][0]["zmin"] = min_value * mult_factor
if max_value:
fig["data"][0]["zmax"] = max_value * mult_factor
if labels:
xlabel = labels[0]
ylabel = labels[1]
else:
xlabel = data.columns.name
ylabel = data.index.name
fig.update_xaxes(title=xlabel)
fig.update_yaxes(title=ylabel)
if title:
fig.update_layout(
title=title,
title_x=0.5,
title_y=0.85,
width=width,
height=height,
)
return fig

@ -0,0 +1,765 @@
import os
from datetime import timedelta
from io import BytesIO
from pathlib import Path
from zipfile import ZipFile
import time
import warnings
import json
import pytz
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from entsoe.entsoe import EntsoePandasClient
from sqlalchemy import MetaData, Table, insert, and_, or_
from pyrecoy import *
def get_fcr_prices(start, end, freq="H") -> pd.DataFrame:
"""Get FCR settlement prices from Regelleistung website
Returns: DataFrame with FCR prices with index with given time frequency in local time.
"""
start = start + timedelta(-1)
end = end + timedelta(1)
data = get_FCR_prices_from_database(start, end, 'NLD')
data = data.resample('15T').ffill()
data = data[['PricePerMWPerISP']]
data.columns = ['FCR NL (EUR/ISP)']
data.index.name = 'datetime'
data = data.tz_convert('Europe/Amsterdam')
return data
path = Path(
f"./data/fcr_prices_{freq}_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(
startdate, enddate, freq=freq, tz="Europe/Amsterdam", name="datetime"
)
return df
dfs = []
retry = 5
for date in pd.date_range(start=start, end=end + timedelta(days=1)):
r = 0
# print(f'DEBUG: {date}')
while r < retry:
try:
url = (
f"https://www.regelleistung.net/apps/cpp-publisher/api/v1/download/tenders/"
f"resultsoverview?date={date.strftime('%Y-%m-%d')}&exportFormat=xlsx&market=CAPACITY&productTypes=FCR"
)
df = pd.read_excel(url, engine="openpyxl")[
[
"DATE_FROM",
"PRODUCTNAME",
"NL_SETTLEMENTCAPACITY_PRICE_[EUR/MW]",
"DE_SETTLEMENTCAPACITY_PRICE_[EUR/MW]",
]
]
# print(f'DEBUG: {date} read in')
dfs.append(df)
break
except Exception:
# print(r)
time.sleep(1)
r += 1
warnings.warn(
f'No data received for {date.strftime("%Y-%m-%d")}. Retrying...({r}/{retry})'
)
if r == retry:
raise RuntimeError(f'No data received for {date.strftime("%Y-%m-%d")}')
df = pd.concat(dfs, axis=0)
df["hour"] = df["PRODUCTNAME"].map(lambda x: int(x.split("_")[1]))
df["Timeblocks"] = (
df["PRODUCTNAME"].map(lambda x: int(x.split("_")[2])) - df["hour"]
)
df.index = df.apply(
lambda row: pd.to_datetime(row["DATE_FROM"]) + timedelta(hours=row["hour"]),
axis=1,
).dt.tz_localize("Europe/Amsterdam")
df.drop(columns=["DATE_FROM", "PRODUCTNAME", "hour"], inplace=True)
df.rename(
columns={
"NL_SETTLEMENTCAPACITY_PRICE_[EUR/MW]": f"FCR Price NL [EUR/MW/{freq}]",
"DE_SETTLEMENTCAPACITY_PRICE_[EUR/MW]": f"FCR Price DE [EUR/MW/{freq}]",
},
inplace=True,
)
try:
df[f"FCR Price NL [EUR/MW/{freq}]"] = df[
f"FCR Price NL [EUR/MW/{freq}]"
].astype(float)
df[f"FCR Price DE [EUR/MW/{freq}]"] = df[
f"FCR Price NL [EUR/MW/{freq}]"
].astype(float)
except Exception as e:
warnings.warn(
f"Could not convert data to floats. Should check... Exception: {e}"
)
df = df[~df.index.duplicated(keep="first")]
new_ix = pd.date_range(
start=df.index[0], end=df.index[-1], freq=freq, tz="Europe/Amsterdam"
)
df = df.reindex(new_ix, method="ffill")
mult = {"H": 1, "4H": 4, "D": 24}
df[f"FCR Price NL [EUR/MW/{freq}]"] /= df["Timeblocks"] / mult[freq]
df[f"FCR Price DE [EUR/MW/{freq}]"] /= df["Timeblocks"] / mult[freq]
df = df[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
df.to_csv(path, sep=";", decimal=",", index_label="datetime")
return df
def get_tennet_data(exporttype, start, end):
"""Download data from TenneT API
TenneT documentation:
https://www.tennet.org/bedrijfsvoering/exporteer_data_toelichting.aspx
Parameters:
-----------
exporttype : str
Exporttype as defined in TenneT documentation.
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with API output.
"""
datefrom = start.strftime("%d-%m-%Y")
dateto = end.strftime("%d-%m-%Y")
url = (
f"http://www.tennet.org/bedrijfsvoering/ExporteerData.aspx?exporttype={exporttype}"
f"&format=csv&datefrom={datefrom}&dateto={dateto}&submit=1"
)
return pd.read_csv(url, decimal=",")
def get_imb_prices_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
exporttype = "verrekenprijzen"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["invoeden", "Afnemen", "regeltoestand"]]
data.columns = ["POS", "NEG", "RS"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def get_balansdelta_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
filename = f"balansdelta_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime")
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(startdate, enddate, freq="1T", tz="Europe/Amsterdam")
return data
exporttype = "balansdelta2017"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="1T", tz="Europe/Amsterdam")
data.index = date_ix
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_from_entsoe(start, end, marketagreement_type, entsoe_api_key):
client = EntsoePandasClient(entsoe_api_key)
return client.query_contracted_reserve_prices(
country_code="NL",
start=start,
end=end + timedelta(days=1),
type_marketagreement_type=marketagreement_type,
)
def get_afrr_capacity_fees_nl(start, end, entsoe_api_key=None):
path = Path(
f"./data/afrr_capacity_fees_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(startdate, enddate, freq="D", tz="Europe/Amsterdam")
return df
if not entsoe_api_key:
try:
entsoe_api_key = os.environ["ENTSOE_API_KEY"]
except:
raise ValueError("Please enter ENTSOE API key")
date_to_daily_bids = pd.to_datetime("2020-08-31").tz_localize("Europe/Amsterdam")
if start < date_to_daily_bids:
_start = start - timedelta(days=7)
data = _get_afrr_prices_from_entsoe(
start=_start,
end=min(date_to_daily_bids, end),
marketagreement_type="A02",
entsoe_api_key=entsoe_api_key,
)[["Automatic frequency restoration reserve - Symmetric"]]
if end > date_to_daily_bids:
_end = date_to_daily_bids - timedelta(days=1)
else:
_end = end
dt_index = pd.date_range(start, _end, freq="D", tz="Europe/Amsterdam")
data = data.reindex(dt_index, method="ffill")
# ENTSOE:
# "Before week no. 1 of 2020 the values are published per period
# per MW (Currency/MW per procurement period); meaning that it
# is not divided by MTU/ISP in that period."
if start < pd.to_datetime("2019-12-23"):
data[: pd.to_datetime("2019-12-22")] /= 7 * 24 * 4
if end >= date_to_daily_bids:
_data = (
_get_afrr_prices_from_entsoe(
start=max(date_to_daily_bids, start),
end=end,
marketagreement_type="A01",
entsoe_api_key=entsoe_api_key,
)
.resample("D")
.first()
)
cols = [
"Automatic frequency restoration reserve - Down",
"Automatic frequency restoration reserve - Symmetric",
"Automatic frequency restoration reserve - Up",
]
for col in cols:
if col not in _data.columns:
_data[col] = np.NaN
_data = _data[cols]
try:
data = pd.concat([data, _data], axis=0)
except Exception:
data = _data
data = data[start:end]
new_col_names = {
"Automatic frequency restoration reserve - Down": "aFRR Down [€/MW/day]",
"Automatic frequency restoration reserve - Symmetric": "aFRR Symmetric [€/MW/day]",
"Automatic frequency restoration reserve - Up": "aFRR Up [€/MW/day]",
}
data.rename(columns=new_col_names, inplace=True)
hours_per_day = (
pd.Series(
data=0,
index=pd.date_range(
start,
end + timedelta(days=1),
freq="15T",
tz="Europe/Amsterdam",
inclusive="left",
),
)
.resample("D")
.count()
)
data = data.multiply(hours_per_day.values, axis=0).round(2)
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_nl_from_tennet(start, end):
"""Get aFRR prices from TenneT API
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with imbalance prices.
"""
filename = f"afrr_prices_nl_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(
float
)
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(
startdate, enddate, freq="15T", tz="Europe/Amsterdam"
)
return data
data = get_tennet_data("verrekenprijzen", start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, "verrekenprijzen")
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["opregelen", "Afregelen"]]
data.columns = ["price_up", "price_down"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def get_afrr_prices_nl(start, end):
bd = get_balansdelta_nl(start=start, end=end)[
["Hoogste_prijs_opregelen", "Laagste_prijs_afregelen"]
]
bd.columns = ["rt_price_UP", "rt_price_DOWN"]
afrr_prices = _get_afrr_prices_nl_from_tennet(start, end).reindex(
bd.index, method="ffill"
)
return pd.concat([afrr_prices, bd], axis=1)
def _get_index_first_and_last_entry(data, exporttype):
if exporttype == "balansdelta2017":
time_col_name = "tijd"
elif exporttype == "verrekenprijzen":
time_col_name = "periode_van"
return [
pd.to_datetime(
" ".join((data["datum"].iloc[ix], data[time_col_name].iloc[ix])),
format="%d-%m-%Y %H:%M",
)
for ix in [0, -1]
]
def _handle_missing_data_by_reindexing(data):
print("Warning: Entries missing from TenneT data.")
data.index = data[["datum", "periode_van"]].apply(lambda x: " ".join(x), axis=1)
data.index = pd.to_datetime(data.index, format="%d-%m-%Y %H:%M").tz_localize(
"Europe/Amsterdam", ambiguous=True
)
data = data[~data.index.duplicated(keep="first")]
date_ix = pd.date_range(
data.index[0], data.index[-1], freq="15T", tz="Europe/Amsterdam"
)
data = data.reindex(date_ix)
print("Workaround implemented: Dataset was reindexed automatically.")
return data
def get_imb_prices_be(startdate, enddate):
start = pd.to_datetime(startdate).tz_localize("Europe/Brussels").tz_convert("UTC")
end = (
pd.to_datetime(enddate).tz_localize("Europe/Brussels") + timedelta(days=1)
).tz_convert("UTC")
rows = int((end - start) / timedelta(minutes=15))
resp_df = pd.DataFrame()
while rows > 0:
print(f"Getting next chunk, {rows} remaining.")
chunk = min(3000, rows)
end = start + timedelta(minutes=chunk * 15)
resp_df = pd.concat([resp_df, elia_api_call(start, end)], axis=0)
start = end
rows -= chunk
resp_df.index = pd.date_range(
start=resp_df.index[0], end=resp_df.index[-1], tz="Europe/Brussels", freq="15T"
)
resp_df.index.name = "datetime"
resp_df = resp_df[
["positiveimbalanceprice", "negativeimbalanceprice", "qualitystatus"]
].rename(columns={"positiveimbalanceprice": "POS", "negativeimbalanceprice": "NEG"})
resp_df["Validated"] = False
resp_df.loc[resp_df["qualitystatus"] == "Validated", "Validated"] = True
resp_df.drop(columns=["qualitystatus"], inplace=True)
return resp_df
def elia_api_call(start, end):
dataset = "ods047"
sort_by = "datetime"
url = "https://opendata.elia.be/api/records/1.0/search/"
rows = int((end - start) / timedelta(minutes=15))
end = end - timedelta(minutes=15)
endpoint = (
f"?dataset={dataset}&q=datetime:[{start.strftime('%Y-%m-%dT%H:%M:%SZ')}"
f" TO {end.strftime('%Y-%m-%dT%H:%M:%SZ')}]&rows={rows}&sort={sort_by}"
)
for _ in range(5):
try:
resp = requests.get(url + endpoint)
if resp.ok:
break
else:
raise Exception()
except Exception:
print("retrying...")
time.sleep(1)
if not resp.ok:
raise Exception(f"Error when calling API. Status code: {resp.status_code}")
resp_json = json.loads(resp.content)
resp_json = [entry["fields"] for entry in resp_json["records"]]
df = pd.DataFrame(resp_json).set_index("datetime")
df.index = pd.to_datetime(df.index, utc=True).tz_convert("Europe/Brussels")
df = df.sort_index()
return df
def get_da_prices_from_entsoe(
start, end, country_code, tz, freq="H", entsoe_api_key=None
):
"""Get Day-Ahead prices from ENTSOE
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with day-ahead prices.
"""
if not entsoe_api_key:
try:
entsoe_api_key = "f6c67fd5-e423-47bc-8a3c-98125ccb645e"
except:
raise ValueError("Please enter ENTSOE API key")
client = EntsoePandasClient(entsoe_api_key)
data = client.query_day_ahead_prices(
country_code, start=start, end=end + timedelta(days=1)
)
data = data[~data.index.duplicated()]
data.index = pd.date_range(data.index[0], data.index[-1], freq="H", tz=tz)
if freq != "H":
data = _reindex_to_freq(data, freq, tz)
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def _reindex_to_freq(data, freq, tz):
new_ix = pd.date_range(
data.index[0],
data.index[-1] + timedelta(hours=1),
freq=freq,
tz=tz,
)
return data.reindex(index=new_ix, method="ffill")
def get_da_prices_nl(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "NL", "Europe/Amsterdam", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_da_prices_be(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "BE", "Europe/Brussels", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_ets_prices(start, end, freq="D"):
"""Get CO2 prices (ETS) from ICE
Values are in /ton CO2
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with ETS settlement prices with datetime index (local time)
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ets_prices_from_database(start_x, end_x, 'NLD')
data = data.resample('1T').ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
# here = pytz.timezone("Europe/Amsterdam")
# start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
# end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
# path = Path(
# f"./data/ets_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
# )
# if path.exists():
# return _load_from_csv(path, freq=freq)
# else:
# raise Exception("Data not available for chosen dates.")
def get_ttf_prices(start, end, freq="D"):
"""Get Day-Ahead natural gas prices (TTF Day-ahead) from ICE
Values are in /MWh
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with TTF day-ahead prices with datetime index (local time)
Start and End are converted into start of year and end of year
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ttf_prices_from_database(start_x, end_x, 'NLD')
data = data.resample('1T').ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
# # while start_year <= end_year:
# here = pytz.timezone("Europe/Amsterdam")
# start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
# end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
# path = Path(
# f"./data/ttf_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
# )
# print(path)
# if path.exists():
# return _load_from_csv(path, freq=freq)
# else:
# raise Exception("Data not available for chosen dates.")
def _load_from_csv(filepath, freq):
data = pd.read_csv(
filepath,
delimiter=";",
decimal=",",
parse_dates=False,
index_col="datetime",
)
ix_start = pd.to_datetime(data.index[0], utc=True).tz_convert("Europe/Amsterdam")
ix_end = pd.to_datetime(data.index[-1], utc=True).tz_convert("Europe/Amsterdam")
data.index = pd.date_range(ix_start, ix_end, freq=freq, tz="Europe/Amsterdam")
return data.squeeze()
##### RECOY DATABASE QUERIES #####
def get_day_ahead_prices_from_database(start_hour, end_hour, CountryIsoCode, tz='utc'):
table = 'DayAheadPrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['HourStartTime'] >= start_hour,
table.columns['HourStartTime'] < end_hour
))
data = pd.DataFrame(data)
data['HourStartTime'] = pd.to_datetime(data['HourStartTime'], utc=True)
data.index = data['HourStartTime']
data.index.name = 'datetime'
data = data[['Price', 'CountryIsoCode']]
data.columns = ['DAM', 'CountryIsoCode']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_imbalance_prices_from_database(start_quarter, end_quarter, CountryIsoCode, tz='utc'):
table = 'ImbalancePrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['QuarterStartTime'] >= start_quarter,
table.columns['QuarterStartTime'] < end_quarter
))
data = pd.DataFrame(data)
data['QuarterStartTime'] = pd.to_datetime(data['QuarterStartTime'], utc=True)
data.index = data['QuarterStartTime']
data.index.name = 'datetime'
data = data[['FeedToGridPrice', 'TakeFromGridPrice', 'CountryIsoCode']]
data.columns = ['POS', 'NEG', 'CountryIsoCode']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_FCR_prices_from_database(start_day, end_day, CountryIsoCode, tz='utc'):
table = 'ReservePrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['Timestamp'] >= start_day,
table.columns['Timestamp'] <= end_day,
table.columns['ReserveType'] == 'FCR'
))
data = pd.DataFrame(data)
data['Timestamp'] = pd.to_datetime(data['Timestamp'], utc=True)
data.index = data['Timestamp']
data.index.name = 'datetime'
data = data[['PricePerMWPerISP', 'CountryIsoCode']]
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_imbalance_forecasts_from_database_on_publication_time(start_publication_time, end_publication_time, ForecastSources, CountryIsoCodes):
table = 'ImbalancePriceForecasts'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'].in_(CountryIsoCodes),
table.columns['PublicationTime'] >= start_publication_time,
table.columns['PublicationTime'] < end_publication_time,
table.columns['ForecastSource'].in_(ForecastSources)
))
return pd.DataFrame(data)
def get_imbalance_forecasts_from_database_on_quarter_start_time(start_quarter, end_quarter, ForecastSources, CountryIsoCode):
table = 'ImbalancePriceForecasts'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['QuarterStartTime'] >= start_quarter,
table.columns['QuarterStartTime'] < end_quarter,
table.columns['PublicationTime'] < end_quarter,
table.columns['ForecastSource'].in_(ForecastSources)
))
return pd.DataFrame(data)
def get_ttf_prices_from_database(start, end, CountryIsoCode, tz='utc'):
if start.tzinfo != pytz.utc:
start = start.astimezone(pytz.utc)
if end.tzinfo != pytz.utc:
end = end.astimezone(pytz.utc)
table = 'GasPrices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['Timestamp'] >= start,
table.columns['Timestamp'] < end
))
data = pd.DataFrame(data)
data['Timestamp'] = pd.to_datetime(data['Timestamp'], utc=True)
data.index = data['Timestamp']
data.index.name = 'datetime'
data = data[['TTFPrice']]
data.columns = ['Gas prices (€/MWh)']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data
def get_ets_prices_from_database(start, end, CountryIsoCode, tz='utc'):
if start.tzinfo != pytz.utc:
start = start.astimezone(pytz.utc)
if end.tzinfo != pytz.utc:
end = end.astimezone(pytz.utc)
table = 'Co2Prices'
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
data = session.query(table).filter(and_(
table.columns['CountryIsoCode'] == CountryIsoCode,
table.columns['Timestamp'] >= start,
table.columns['Timestamp'] < end
))
data = pd.DataFrame(data)
data['Timestamp'] = pd.to_datetime(data['Timestamp'], utc=True)
data.index = data['Timestamp']
data.index.name = 'datetime'
data = data[['Price']]
data.columns = ['CO2 prices (€/MWh)']
if tz.__eq__('utc') is False:
data = data.tz_convert(tz)
return data

@ -0,0 +1,752 @@
import os
from datetime import timedelta
from io import BytesIO
from pathlib import Path
from zipfile import ZipFile
import time
import warnings
import json
import pytz
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from entsoe.entsoe import EntsoePandasClient
from sqlalchemy import MetaData, Table, insert, and_, or_
from pyrecoy import *
def get_fcr_prices(start, end, freq="H") -> pd.DataFrame:
"""Get FCR settlement prices from Regelleistung website
Returns: DataFrame with FCR prices with index with given time frequency in local time.
"""
start = start + timedelta(-1)
end = end + timedelta(1)
data = get_FCR_prices_from_database(start, end, "NLD")
data = data.resample("15T").ffill()
data = data[["PricePerMWPerISP"]]
data.columns = ["FCR NL (EUR/ISP)"]
data.index.name = "datetime"
data = data.tz_convert("Europe/Amsterdam")
return data
def get_tennet_data(exporttype, start, end):
"""Download data from TenneT API
TenneT documentation:
https://www.tennet.org/bedrijfsvoering/exporteer_data_toelichting.aspx
Parameters:
-----------
exporttype : str
Exporttype as defined in TenneT documentation.
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with API output.
"""
datefrom = start.strftime("%d-%m-%Y")
dateto = end.strftime("%d-%m-%Y")
url = (
f"http://www.tennet.org/bedrijfsvoering/ExporteerData.aspx?exporttype={exporttype}"
f"&format=csv&datefrom={datefrom}&dateto={dateto}&submit=1"
)
return pd.read_csv(url, decimal=",")
def get_imb_prices_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
exporttype = "verrekenprijzen"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["invoeden", "Afnemen", "regeltoestand"]]
data.columns = ["POS", "NEG", "RS"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def get_balansdelta_nl(start: pd.Timestamp, end: pd.Timestamp) -> pd.DataFrame:
filename = f"balansdelta_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime")
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(startdate, enddate, freq="1T", tz="Europe/Amsterdam")
return data
exporttype = "balansdelta2017"
data = get_tennet_data(exporttype, start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, exporttype)
date_ix = pd.date_range(first_entry, last_entry, freq="1T", tz="Europe/Amsterdam")
data.index = date_ix
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_from_entsoe(start, end, marketagreement_type, entsoe_api_key):
client = EntsoePandasClient(entsoe_api_key)
return client.query_contracted_reserve_prices(
country_code="NL",
start=start,
end=end + timedelta(days=1),
type_marketagreement_type=marketagreement_type,
)
def get_afrr_capacity_fees_nl(start, end, entsoe_api_key=None):
path = Path(
f"./data/afrr_capacity_fees_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
)
if path.exists():
df = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(float)
startdate = pd.to_datetime(df.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(df.index[-1]).strftime("%Y-%m-%d %H:%M")
df.index = pd.date_range(startdate, enddate, freq="D", tz="Europe/Amsterdam")
return df
if not entsoe_api_key:
try:
entsoe_api_key = os.environ["ENTSOE_API_KEY"]
except:
raise ValueError("Please enter ENTSOE API key")
date_to_daily_bids = pd.to_datetime("2020-08-31").tz_localize("Europe/Amsterdam")
if start < date_to_daily_bids:
_start = start - timedelta(days=7)
data = _get_afrr_prices_from_entsoe(
start=_start,
end=min(date_to_daily_bids, end),
marketagreement_type="A02",
entsoe_api_key=entsoe_api_key,
)[["Automatic frequency restoration reserve - Symmetric"]]
if end > date_to_daily_bids:
_end = date_to_daily_bids - timedelta(days=1)
else:
_end = end
dt_index = pd.date_range(start, _end, freq="D", tz="Europe/Amsterdam")
data = data.reindex(dt_index, method="ffill")
# ENTSOE:
# "Before week no. 1 of 2020 the values are published per period
# per MW (Currency/MW per procurement period); meaning that it
# is not divided by MTU/ISP in that period."
if start < pd.to_datetime("2019-12-23"):
data[: pd.to_datetime("2019-12-22")] /= 7 * 24 * 4
if end >= date_to_daily_bids:
_data = (
_get_afrr_prices_from_entsoe(
start=max(date_to_daily_bids, start),
end=end,
marketagreement_type="A01",
entsoe_api_key=entsoe_api_key,
)
.resample("D")
.first()
)
cols = [
"Automatic frequency restoration reserve - Down",
"Automatic frequency restoration reserve - Symmetric",
"Automatic frequency restoration reserve - Up",
]
for col in cols:
if col not in _data.columns:
_data[col] = np.NaN
_data = _data[cols]
try:
data = pd.concat([data, _data], axis=0)
except Exception:
data = _data
data = data[start:end]
new_col_names = {
"Automatic frequency restoration reserve - Down": "aFRR Down [€/MW/day]",
"Automatic frequency restoration reserve - Symmetric": "aFRR Symmetric [€/MW/day]",
"Automatic frequency restoration reserve - Up": "aFRR Up [€/MW/day]",
}
data.rename(columns=new_col_names, inplace=True)
hours_per_day = (
pd.Series(
data=0,
index=pd.date_range(
start,
end + timedelta(days=1),
freq="15T",
tz="Europe/Amsterdam",
inclusive="left",
),
)
.resample("D")
.count()
)
data = data.multiply(hours_per_day.values, axis=0).round(2)
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def _get_afrr_prices_nl_from_tennet(start, end):
"""Get aFRR prices from TenneT API
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
Returns:
--------
DataFrame with imbalance prices.
"""
filename = f"afrr_prices_nl_{start.strftime('%Y%m%d')}_{end.strftime('%Y%m%d')}.csv"
path = Path("./data") / filename
if path.exists():
data = pd.read_csv(path, sep=";", decimal=",", index_col="datetime").astype(
float
)
startdate = pd.to_datetime(data.index[0]).strftime("%Y-%m-%d %H:%M")
enddate = pd.to_datetime(data.index[-1]).strftime("%Y-%m-%d %H:%M")
data.index = pd.date_range(
startdate, enddate, freq="15T", tz="Europe/Amsterdam"
)
return data
data = get_tennet_data("verrekenprijzen", start, end)
first_entry, last_entry = _get_index_first_and_last_entry(data, "verrekenprijzen")
date_ix = pd.date_range(first_entry, last_entry, freq="15T", tz="Europe/Amsterdam")
if len(data) == len(date_ix):
data.index = date_ix
else:
data = _handle_missing_data_by_reindexing(data)
data = data[["opregelen", "Afregelen"]]
data.columns = ["price_up", "price_down"]
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
if not path.exists():
data.to_csv(path, sep=";", decimal=",", index_label="datetime")
return data
def get_afrr_prices_nl(start, end):
bd = get_balansdelta_nl(start=start, end=end)[
["Hoogste_prijs_opregelen", "Laagste_prijs_afregelen"]
]
bd.columns = ["rt_price_UP", "rt_price_DOWN"]
afrr_prices = _get_afrr_prices_nl_from_tennet(start, end).reindex(
bd.index, method="ffill"
)
return pd.concat([afrr_prices, bd], axis=1)
def _get_index_first_and_last_entry(data, exporttype):
if exporttype == "balansdelta2017":
time_col_name = "tijd"
elif exporttype == "verrekenprijzen":
time_col_name = "periode_van"
return [
pd.to_datetime(
" ".join((data["datum"].iloc[ix], data[time_col_name].iloc[ix])),
format="%d-%m-%Y %H:%M",
)
for ix in [0, -1]
]
def _handle_missing_data_by_reindexing(data):
print("Warning: Entries missing from TenneT data.")
data.index = data[["datum", "periode_van"]].apply(lambda x: " ".join(x), axis=1)
data.index = pd.to_datetime(data.index, format="%d-%m-%Y %H:%M").tz_localize(
"Europe/Amsterdam", ambiguous=True
)
data = data[~data.index.duplicated(keep="first")]
date_ix = pd.date_range(
data.index[0], data.index[-1], freq="15T", tz="Europe/Amsterdam"
)
data = data.reindex(date_ix)
print("Workaround implemented: Dataset was reindexed automatically.")
return data
def get_imb_prices_be(startdate, enddate):
start = pd.to_datetime(startdate).tz_localize("Europe/Brussels").tz_convert("UTC")
end = (
pd.to_datetime(enddate).tz_localize("Europe/Brussels") + timedelta(days=1)
).tz_convert("UTC")
rows = int((end - start) / timedelta(minutes=15))
resp_df = pd.DataFrame()
while rows > 0:
print(f"Getting next chunk, {rows} remaining.")
chunk = min(3000, rows)
end = start + timedelta(minutes=chunk * 15)
resp_df = pd.concat([resp_df, elia_api_call(start, end)], axis=0)
start = end
rows -= chunk
resp_df.index = pd.date_range(
start=resp_df.index[0], end=resp_df.index[-1], tz="Europe/Brussels", freq="15T"
)
resp_df.index.name = "datetime"
resp_df = resp_df[
["positiveimbalanceprice", "negativeimbalanceprice", "qualitystatus"]
].rename(columns={"positiveimbalanceprice": "POS", "negativeimbalanceprice": "NEG"})
resp_df["Validated"] = False
resp_df.loc[resp_df["qualitystatus"] == "Validated", "Validated"] = True
resp_df.drop(columns=["qualitystatus"], inplace=True)
return resp_df
def elia_api_call(start, end):
dataset = "ods047"
sort_by = "datetime"
url = "https://opendata.elia.be/api/records/1.0/search/"
rows = int((end - start) / timedelta(minutes=15))
end = end - timedelta(minutes=15)
endpoint = (
f"?dataset={dataset}&q=datetime:[{start.strftime('%Y-%m-%dT%H:%M:%SZ')}"
f" TO {end.strftime('%Y-%m-%dT%H:%M:%SZ')}]&rows={rows}&sort={sort_by}"
)
for _ in range(5):
try:
resp = requests.get(url + endpoint)
if resp.ok:
break
else:
raise Exception()
except Exception:
print("retrying...")
time.sleep(1)
if not resp.ok:
raise Exception(f"Error when calling API. Status code: {resp.status_code}")
resp_json = json.loads(resp.content)
resp_json = [entry["fields"] for entry in resp_json["records"]]
df = pd.DataFrame(resp_json).set_index("datetime")
df.index = pd.to_datetime(df.index, utc=True).tz_convert("Europe/Brussels")
df = df.sort_index()
return df
def get_da_prices_from_entsoe(
start, end, country_code, tz, freq="H", entsoe_api_key=None
):
"""Get Day-Ahead prices from ENTSOE
Parameters:
-----------
start : pd.Timestamp
Start date
end : pd.Timestamp
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with day-ahead prices.
"""
if not entsoe_api_key:
try:
entsoe_api_key = "f6c67fd5-e423-47bc-8a3c-98125ccb645e"
except:
raise ValueError("Please enter ENTSOE API key")
client = EntsoePandasClient(entsoe_api_key)
data = client.query_day_ahead_prices(
country_code, start=start, end=end + timedelta(days=1)
)
data = data[~data.index.duplicated()]
data.index = pd.date_range(data.index[0], data.index[-1], freq="H", tz=tz)
if freq != "H":
data = _reindex_to_freq(data, freq, tz)
data = data[start.strftime("%Y-%m-%d") : end.strftime("%Y-%m-%d")]
return data
def _reindex_to_freq(data, freq, tz):
new_ix = pd.date_range(
data.index[0],
data.index[-1] + timedelta(hours=1),
freq=freq,
tz=tz,
)
return data.reindex(index=new_ix, method="ffill")
def get_da_prices_nl(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "NL", "Europe/Amsterdam", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_da_prices_be(start, end, freq="H", entsoe_api_key=None):
return get_da_prices_from_entsoe(
start, end, "BE", "Europe/Brussels", freq=freq, entsoe_api_key=entsoe_api_key
)
def get_ets_prices(start, end, freq="D"):
"""Get CO2 prices (ETS) from ICE
Values are in /ton CO2
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with ETS settlement prices with datetime index (local time)
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ets_prices_from_database(start_x, end_x, "NLD")
data = data.resample("1T").ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
here = pytz.timezone("Europe/Amsterdam")
start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
path = Path(
f"./data/ets_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
)
if path.exists():
return _load_from_csv(path, freq=freq)
else:
raise Exception("Data not available for chosen dates.")
def get_ets_prices_excel(start, end, freq="D"):
"""Get CO2 prices (ETS) from ICE
Values are in /ton CO2
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with ETS settlement prices with datetime index (local time)
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = pd.read_excel(r"C:\Users\shahla.huseynova\Heliox Group B.V\Recoy - Documents\01 _ Acquisition\FOLBB\CO2 prijzen EEX 2021-2023.xlsx",
sheet_name = 'SINGLE COL',
parse_dates = True,
index_col = 0
)
data.set_index(pd.to_datetime(data.index))
data.index = pd.to_datetime(data.index,dayfirst=True)
data = data.groupby(data.index).last()
data.index = data.index.tz_localize('Europe/Amsterdam')
data = data.resample("1T").ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
here = pytz.timezone("Europe/Amsterdam")
start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
path = Path(
f"./data/ets_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
)
if path.exists():
return _load_from_csv(path, freq=freq)
else:
raise Exception("Data not available for chosen dates.")
def get_ttf_prices(start, end, freq="D"):
"""Get Day-Ahead natural gas prices (TTF Day-ahead) from ICE
Values are in /MWh
Parameters:
-----------
start : datetime
Start date
end : datetime
End date
freq : str
Frequency, e.g. '15T' or 'H'
Returns:
--------
Series with TTF day-ahead prices with datetime index (local time)
Start and End are converted into start of year and end of year
"""
start_x = start + timedelta(days=-2)
end_x = end + timedelta(days=2)
data = get_ttf_prices_from_database(start_x, end_x, "NLD")
data = data.resample("1T").ffill()
data = data.loc[(data.index >= start) & (data.index < end)]
return data
# while start_year <= end_year:
here = pytz.timezone("Europe/Amsterdam")
start_file = pd.Timestamp(str(start.year) + "-1-1", tz=here).to_pydatetime()
end_file = pd.Timestamp(str(start.year) + "-12-31", tz=here).to_pydatetime()
path = Path(
f"./data/ttf_prices_{freq}_{start_file.strftime('%Y%m%d')}_{end_file.strftime('%Y%m%d')}.csv"
)
if path.exists():
return _load_from_csv(path, freq=freq)
else:
raise Exception("Data not available for chosen dates.")
def _load_from_csv(filepath, freq):
data = pd.read_csv(
filepath,
delimiter=";",
decimal=",",
parse_dates=False,
index_col="datetime",
)
ix_start = pd.to_datetime(data.index[0], utc=True).tz_convert("Europe/Amsterdam")
ix_end = pd.to_datetime(data.index[-1], utc=True).tz_convert("Europe/Amsterdam")
data.index = pd.date_range(ix_start, ix_end, freq=freq, tz="Europe/Amsterdam")
return data.squeeze()
##### RECOY DATABASE QUERIES #####
def convert_columns_to_localized_datetime_from_utc(df, columns, tz):
for column in columns:
df[column] = pd.to_datetime(df[column], utc=True)
df[column] = df[column].dt.tz_convert(tz)
return df
def get_price_data_from_database(
database_name,
time_index_column,
database_columns,
rename_columns,
start,
end,
CountryIsoCode,
tz="utc",
to_datetime_columns=[],
):
"""_summary_
Args:
database_name (string): name of the database
time_index_column (string): column which is converted to a datetime column and used as the index
database_columns (list of strings): columns of the database table you want to query
rename_columns (list of strings): new names for the columns which are queried
start (string or datetime): start time of the data you want to select based on the time_index_column
end (string or datetime): end time of the data you want to select based on the time_index_column
CountryIsoCode (string): CountryIsoCode of the data
tz (str, optional): Timezone you want the datatime columns to be converted to
to_datetime_columns (list, optional): Additional columns which are transferred to datetime columns. Defaults to [].
Returns:
_type_: _description_
"""
table = database_name
md = MetaData(ENGINE_PRICES)
table = Table(table, md, autoload=True)
session = sessionmaker(bind=ENGINE_PRICES)()
end = end + timedelta(days=+1)
data = session.query(table).filter(
and_(
table.columns["CountryIsoCode"] == CountryIsoCode,
table.columns[time_index_column] >= start,
table.columns[time_index_column] < end,
)
)
data = pd.DataFrame(data)
data[time_index_column] = pd.to_datetime(data[time_index_column], utc=True)
data.index = data[time_index_column]
data.index.name = "datetime"
data = data[database_columns + ["CountryIsoCode"]]
data.columns = rename_columns + ["CountryIsoCode"]
if tz.__eq__("utc") is False:
data = data.tz_convert(tz)
data = convert_columns_to_localized_datetime_from_utc(data, to_datetime_columns, tz)
return data
def get_day_ahead_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"DayAheadPrices",
"HourStartTime",
["Price"],
["DAM"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_imbalance_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"ImbalancePrices",
"QuarterStartTime",
["FeedToGridPrice", "TakeFromGridPrice"],
["POS", "NEG"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_imbalance_forecasts_from_database_on_publication_time(
start, end, CountryIsoCode, tz="utc"
):
return get_price_data_from_database(
"ImbalancePriceForecasts",
"PublicationTime",
["PublicationTime", "QuarterStartTime", "FeedToGridPrice", "TakeFromGridPrice"],
["PublicationTime", "QuarterStartTime", "ForePos", "ForeNeg"],
start,
end,
CountryIsoCode,
tz=tz,
to_datetime_columns=["QuarterStartTime"],
)
def get_imbalance_forecasts_from_database_on_quarter_start_time(
start, end, CountryIsoCode, tz="utc"
):
return get_price_data_from_database(
"ImbalancePriceForecasts",
"QuarterStartTime",
["PublicationTime", "QuarterStartTime", "FeedToGridPrice", "TakeFromGridPrice"],
["PublicationTime", "QuarterStartTime", "ForePos", "ForeNeg"],
start,
end,
CountryIsoCode,
tz=tz,
to_datetime_columns=["QuarterStartTime"],
)
def get_ttf_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"GasPrices",
"DeliveryDate",
["Price"],
["Gas prices (€/MWh)"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_ets_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_price_data_from_database(
"Co2Prices",
"DeliveryDate",
["Price"],
["CO2 prices (€/MWh)"],
start,
end,
CountryIsoCode,
tz=tz,
)
def get_reserve_prices_from_database(
start, end, reserve_type, CountryIsoCode, tz="utc"
):
data = get_price_data_from_database(
"ReservePrices",
"Timestamp",
["PricePerMWPerISP", "ReserveType"],
["PricePerMWPerISP", "ReserveType"],
start,
end,
CountryIsoCode,
tz=tz,
)
data = data.loc[data["ReserveType"] == reserve_type]
return data
def get_FCR_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(start, end, "FCR", CountryIsoCode, tz=tz)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "aFRR Up", CountryIsoCode, tz=tz
)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "aFRR Down", CountryIsoCode, tz=tz
)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "mFRR Up", CountryIsoCode, tz=tz
)
def get_aFRR_up_prices_from_database(start, end, CountryIsoCode, tz="utc"):
return get_reserve_prices_from_database(
start, end, "mFRR Up", CountryIsoCode, tz=tz
)

@ -0,0 +1,156 @@
import numpy as np
import pandas as pd
from .styling import businesscase_formatter, num_formatting, perc_formatting
class CaseReport:
"""Dataframe report showing KPIs for specific CaseStudy.
Parameters:
-----------
case : CaseStudy
kind : str
The report type. {electr_market_results', cashflows', 'ebitda_calc'}.
baseline : CaseStudy
include_perc: bool
"""
def __init__(self, case, kind):
self._check_if_attr_exists(case, kind)
case_data = getattr(case, kind)
self.report = self.create_report(case.name, case_data)
self.formatting = "number"
def _check_if_attr_exists(self, case, kind):
if not hasattr(case, kind):
raise AttributeError(
f"Attribute '{kind}' is not available for '{case.name}' case. "
"You should first generate it using "
"the appropriate CaseStudy method."
)
def create_report(self, case_name, case_data):
if isinstance(case_data, dict):
case_data = pd.Series(case_data)
return pd.DataFrame(case_data, columns=[case_name])
def show(self, presentation_format=True):
if not presentation_format:
return self.report
if self.formatting == "percentage":
return self.report.applymap(perc_formatting)
else:
return self.report.applymap(num_formatting)
class ComparisonReport(CaseReport):
"""Dataframe report showing a copmarison of KPIs between CaseStudy instances.
Parameters:
-----------
cases : list
List of CaseStudy instances
kind : str
Type of report
baseline : CaseStudy
CaseStudy instance to use as baseline
comparison : str
{'absolute', 'relative', 'percentage'}
Sets how the numbers in the comparison are in relation to the baseline.
"""
def __init__(self, cases, kind, baseline=None, comparison="absolute"):
case_reports = []
self.formatting = "number"
for case in cases:
case_report = CaseReport(case=case, kind=kind).report
case_reports.append(case_report)
self.report = pd.concat(case_reports, axis=1).fillna(0)
if comparison == "relative":
self._comp_relative(baseline)
elif comparison == "percentage":
self._comp_percentage(baseline)
# ugly fix to make sure EBITDA is at the bottom when df is printed
if kind == "ebitda_calc":
ix = self.report.index.to_list()
ix.remove("EBITDA (€)")
ix.remove("Depreciation (€)")
ix.remove("EBITDA + depr (€)")
ix.append("EBITDA (€)")
ix.append("Depreciation (€)")
ix.append("EBITDA + depr (€)")
self.report = self.report.reindex(ix)
def _comp_relative(self, baseline):
baseline_report = self.report[baseline.name]
self.report = self.report.subtract(baseline_report, axis=0)
if baseline.name in self.report.columns:
self.report.drop(columns=baseline.name, inplace=True)
if baseline.name in self.report.index:
self.report.drop(index=baseline.name, inplace=True)
self.formatting = "number"
def _comp_percentage(self, baseline):
baseline_report = self.report[baseline.name]
self.report = self.report.divide(baseline_report / 100, axis=0).replace(
[-np.inf, np.inf], 0
)
self.report.replace([-np.inf, np.inf], 0, inplace=True)
self.formatting = "percentage"
class BusinessCaseReport(CaseReport):
"""Show business case for CaseStudy"""
def __init__(self, case, presentation_format=False):
self._check_if_attr_exists(case, "business_case")
self.report = getattr(case, "business_case")
def show(self, presentation_format=True):
if presentation_format:
return businesscase_formatter(self.report)
else:
return self.report
class SingleFigureComparison(ComparisonReport):
def __init__(
self,
cases,
kpi,
label,
baseline=None,
comparison="absolute",
):
figure_dict = {}
for case in cases:
self._check_if_attr_exists(case, kpi)
figure_dict[case.name] = getattr(case, kpi)
self.report = pd.Series(figure_dict, name=label)
if comparison == "relative":
self._comp_relative(baseline)
elif comparison == "percentage":
self._comp_percentage(baseline)
def show(self, nformat=None):
if nformat is not None:
return self.report.apply(nformat.format)
else:
return self.report
def _comp_relative(self, baseline):
baseline_report = self.report[baseline.name]
self.report = self.report.subtract(baseline_report, axis=0)
self.report.drop(index=baseline.name, inplace=True)
self.formatting = "number"

@ -0,0 +1,34 @@
def get_power_profiles(start, end, country, in_local_time=True):
start = timestamp_to_utc(start)
end = timestamp_to_utc(end)
engine = db_engine("rop_test")
connection, table = create_connection(engine, "ImbalancePrices")
start = start.floor("15T")
query = (
select([table])
.where(
table.columns.QuarterStartTime >= start.strftime("%Y-%m-%d %H:%M"),
table.columns.QuarterStartTime < end.strftime("%Y-%m-%d %H:%M"),
table.columns.CountryIsoCode == country,
)
.order_by(table.columns.QuarterStartTime)
)
result = connection.execute(query).fetchall()
if len(result) == 0:
raise Exception("Day-ahead prices data not yet available.")
data = pd.DataFrame(result, columns=result[0].keys())
if in_local_time:
data["QuarterStartTime"] = dt_column_to_local_time(data["QuarterStartTime"])
data.drop(columns=["Id", "CountryIsoCode"], inplace=True)
data.rename(
columns={
"QuarterStartTime": "datetime",
"TakeFromGridPrice": "NEG",
"FeedToGridPrice": "POS",
},
inplace=True,
)
return data.set_index("datetime")[["POS", "NEG"]]

@ -0,0 +1,92 @@
from sqlalchemy import select
import pandas as pd
from .converters import dt_column_to_local_time, timestamp_to_utc
from .databases import db_engine, create_connection
def get_imbalance_prices(start, end, country, in_local_time=True):
start = timestamp_to_utc(start)
end = timestamp_to_utc(end)
engine = db_engine("rop_prices_test")
connection, table = create_connection(engine, "ImbalancePrices")
start = start.floor("15T")
query = (
select([table])
.where(
table.columns.QuarterStartTime >= start.strftime("%Y-%m-%d %H:%M"),
table.columns.QuarterStartTime < end.strftime("%Y-%m-%d %H:%M"),
table.columns.CountryIsoCode == country,
)
.order_by(table.columns.QuarterStartTime)
)
result = connection.execute(query).fetchall()
if len(result) == 0:
raise Exception("Day-ahead prices data not yet available.")
data = pd.DataFrame(result, columns=result[0].keys())
if in_local_time:
data["QuarterStartTime"] = dt_column_to_local_time(data["QuarterStartTime"])
data.drop(columns=["Id", "CountryIsoCode"], inplace=True)
data.rename(
columns={
"QuarterStartTime": "datetime",
"TakeFromGridPrice": "NEG",
"FeedToGridPrice": "POS",
},
inplace=True,
)
return data.set_index("datetime")[["POS", "NEG"]]
def get_dayahead_prices(start, end, country, in_local_time=True):
start = timestamp_to_utc(start)
end = timestamp_to_utc(end)
engine = db_engine("rop_prices_test")
connection, table = create_connection(engine, "DayAheadPrices")
start = start.floor("60T")
query = (
select([table])
.where(
table.columns.HourStartTime >= start.strftime("%Y-%m-%d %H:%M"),
table.columns.HourStartTime < end.strftime("%Y-%m-%d %H:%M"),
table.columns.CountryIsoCode == country,
)
.order_by(table.columns.HourStartTime)
)
result = connection.execute(query).fetchall()
if len(result) == 0:
raise Exception("Day-ahead prices data not yet available.")
data = pd.DataFrame(result, columns=result[0].keys())
if in_local_time:
data["HourStartTime"] = dt_column_to_local_time(data["HourStartTime"])
data.drop(columns=["Id", "CountryIsoCode"], inplace=True)
data.rename(columns={"HourStartTime": "datetime", "Price": "DAM"}, inplace=True)
return data.set_index("datetime")
def get_market_price_data(start, end, country, in_local_time=True):
tz = "Europe/Amsterdam" if in_local_time else "UTC"
dt_ix = pd.date_range(
start=start.floor("H"),
end=end.ceil("H"),
freq="15T",
tz=tz,
inclusive="left",
)
prices = pd.DataFrame(index=dt_ix, columns=["DAM", "POS", "NEG"])
prices["DAM"] = get_dayahead_prices(
start, end, country=country, in_local_time=in_local_time
).reindex(dt_ix, method="ffill")
prices["DAM"].fillna(method="ffill", inplace=True)
imbprices = get_imbalance_prices(
start, end, country=country, in_local_time=in_local_time
)
prices["POS"] = imbprices["POS"]
prices["NEG"] = imbprices["NEG"]
return prices

@ -0,0 +1,225 @@
import itertools
import gc
from copy import deepcopy
from tkinter import Label
import warnings
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
from millify import millify
from plotly.graph_objs import Figure
from .casestudy import CaseStudy
from .colors import recoygreen, recoyred
class SensitivityAnalysis:
"""
Runs an simulation routine with different input configurations,
so that sensitivity of variables can be analysed.
"""
def __init__(self, c, s, routine, param, values, output_kpis):
self.configs = self._generate_configs(c, param, values)
output_dict = self._prepare_output_dict(s.cases, output_kpis)
self.kpis = self._run_sensitivities(s, routine, output_kpis, output_dict)
def _generate_configs(self, c, param, values):
configs = {}
for value in values:
_c = deepcopy(c)
setattr(_c, param, value)
configs[value] = _c
return configs
def _prepare_output_dict(self, cases, output_kpis):
output_dict = dict.fromkeys(self.configs.keys())
for value in self.configs:
output_dict[value] = dict.fromkeys(output_kpis)
for kpi in output_kpis:
output_dict[value][kpi] = dict.fromkeys([case.name for case in cases])
return output_dict
def _run_sensitivities(self, s, routine, output_kpis, output_dict):
for name, c in tqdm(self.configs.items()):
_s = deepcopy(s)
_s = routine(c, _s)
for kpi in output_kpis:
for case in _s.cases:
output_dict[name][kpi][case.name] = getattr(case, kpi, np.nan)
del _s
gc.collect()
return output_dict
def single_kpi_overview(self, kpi, case_names=None):
"""Creates a DataFrame with chosen output kpi,
for each CaseStudy in each Configuration.
"""
if not case_names:
case_names = CaseStudy.instances.keys()
kpi_values = {
name: {case: self.kpis[name][kpi][case] for case in case_names}
for name in self.kpis.keys()
}
return pd.DataFrame(kpi_values).T
def cashflows_comparison(self, case=None, baseline=None):
ebitda_calc_overview = {}
baseline_calc = {}
for input_value, kpi_data in self.kpis.items():
for kpi, case_data in kpi_data.items():
for case_name, data in case_data.items():
if kpi == "cashflows":
if case_name == case:
ebitda_calc_overview[input_value] = data
if case_name == baseline:
baseline_calc[input_value] = data
ebitda_calc_overview = pd.DataFrame(ebitda_calc_overview)
if not baseline:
return ebitda_calc_overview
baseline_calc = pd.DataFrame(baseline_calc)
return ebitda_calc_overview.subtract(baseline_calc, fill_value=0)
class SensitivityMatrix:
def __init__(self, c, s, routine, x_param, y_param, x_vals, y_vals, output_kpis):
self.x_param = x_param
self.y_param = y_param
self.configs = self._generate_configs(c, x_vals, y_vals)
output_dict = self._prepare_output_dict(s.cases, output_kpis)
self.kpis = self._run_sensitivities(s, routine, output_kpis, output_dict)
def _generate_configs(self, c, x_vals, y_vals):
configs = {x_val: dict.fromkeys(y_vals) for x_val in x_vals}
self.xy_combinations = list(itertools.product(x_vals, y_vals))
for x_val, y_val in self.xy_combinations:
_c = deepcopy(c)
setattr(_c, self.x_param, x_val)
setattr(_c, self.y_param, y_val)
configs[x_val][y_val] = _c
return configs
def _prepare_output_dict(self, cases, output_kpis):
output_dict = {}
for name in [case.name for case in cases]:
output_dict[name] = dict.fromkeys(output_kpis)
for kpi in output_kpis:
output_dict[name][kpi] = deepcopy(self.configs)
return output_dict
def _run_sensitivities(self, s, routine, output_kpis, output_dict):
for x_val, y_val in tqdm(self.xy_combinations):
_c = self.configs[x_val][y_val]
_s = deepcopy(s)
_s = routine(_c, _s)
for kpi in output_kpis:
for case in _s.cases:
output = getattr(case, kpi, np.nan)
output_dict[case.name][kpi][x_val][y_val] = output
del _s
del _c
gc.collect()
return output_dict
def show_matrix(self, case_name, kpi):
"""
Creates a DataFrame with chosen output kpi,
for each XY combination
"""
matrix = pd.DataFrame(self.kpis[case_name][kpi])
matrix.columns.name = self.x_param
matrix.index.name = self.y_param
return matrix
class ScenarioAnalysis(SensitivityAnalysis):
def __init__(self, c, s, routine, params_dict, labels, output_kpis):
self.labels = labels
self.configs = self._generate_configs(c, params_dict, labels)
output_dict = self._prepare_output_dict(s.cases, output_kpis)
self.kpis = self._run_sensitivities(s, routine, output_kpis, output_dict)
def _generate_configs(self, c, params_dict, labels):
configs = {}
for i, label in enumerate(labels):
_c = deepcopy(c)
for param, values in params_dict.items():
setattr(_c, param, values[i])
configs[label] = _c
return configs
class TornadoChart:
"""
TODO: Absolute comparison instead of relative
"""
def __init__(self, c, s, routine, case, tornado_vars, output_kpis):
self.case = case
self.kpis = self._run_sensitivities(
c, s, routine, case, tornado_vars, output_kpis
)
def _run_sensitivities(self, c, s, routine, case, tornado_vars, output_kpis):
labels = ["Low", "Medium", "High"]
outputs = {kpi: pd.DataFrame(index=labels) for kpi in output_kpis}
for param, values in tornado_vars.items():
sens = SensitivityAnalysis(c, s, routine, param, values, output_kpis)
for kpi in output_kpis:
output = sens.single_kpi_overview(kpi, case_names=[case.name])[
case.name
]
output.index = labels
outputs[kpi][" ".join((param, str(values)))] = output
for kpi in output_kpis:
base_performance = deepcopy(outputs[kpi].loc["Medium", :])
for scen in labels:
scen_performance = outputs[kpi].loc[scen, :]
relative_performance = (scen_performance / base_performance - 1) * 100
outputs[kpi].loc[scen, :] = relative_performance
outputs[kpi] = outputs[kpi].round(1)
outputs[kpi].sort_values(by="Low", axis=1, ascending=False, inplace=True)
return outputs
def show_chart(
self, kpi, dimensions=(800, 680), title="Tornado Chart", sort_by="Low"
):
outputs = self.kpis[kpi].sort_values(by=sort_by, axis=1, ascending=False)
traces = []
colors = {"Low": recoyred, "High": recoygreen}
for scenario in ["Low", "High"]:
trace = {
"type": "bar",
"x": outputs.loc[scenario, :].tolist(),
"y": outputs.columns,
"orientation": "h",
"name": scenario,
"marker": {"color": colors[scenario]},
}
traces.append(trace)
layout = {
"title": title,
"width": dimensions[0],
"height": dimensions[1],
"barmode": "relative",
"autosize": True,
"showlegend": True,
}
fig = Figure(data=traces, layout=layout)
fig.update_xaxes(
title_text=f"{kpi.upper()} % change compared to base scenario (Base {kpi.upper()} = {millify(getattr(self.case, kpi))})"
)
return fig

@ -0,0 +1,47 @@
from copy import deepcopy
from numbers import Number
import numpy as np
def num_formatting(val):
if np.isnan(val) or round(val, 0) == 0:
return "-"
else:
return f"{val:,.0f}"
def perc_formatting(val):
if np.isnan(val) or round(val, 0) == 0:
return "-"
else:
return f"{val:.1f}%"
def bc_formatting(val):
if not isinstance(val, Number):
return val
if np.isnan(val):
return ""
elif round(val, 2) == 0:
return "-"
else:
return f"{val:,.0f}"
def businesscase_formatter(df):
df_c = deepcopy(df)
spp = df_c.loc["Simple Payback Period", "Year 0"]
spp_str = "N/A" if np.isnan(spp) else str(spp) + " years"
df_c.loc["Simple Payback Period", "Year 0"] = spp_str
irr = df_c.loc["IRR (%)", "Year 0"]
if np.isnan(irr):
df_c.loc["IRR (%)", "Year 0"] = "N/A"
df_c = df_c.applymap(bc_formatting)
if not np.isnan(irr):
df_c.loc["IRR (%)", "Year 0"] += "%"
df_c.loc["WACC (%)", "Year 0"] += "%"
return df_c

@ -0,0 +1,29 @@
from setuptools import setup
# to install run : pip install -e pyrecoy from directory
setup(
name="pyrecoy",
version="0.1",
description="Private package containing utils for flexible power system modelling on energy markets.",
url="#",
author="mekremer",
author_email="kremer@recoy.com",
license="",
packages=["pyrecoy"],
install_requires=[
"requests",
"pandas",
"numpy",
"entsoe-py",
"numpy-financial",
"scipy",
"plotly",
"tqdm",
"millify",
"bs4",
"xmltodict",
"openpyxl",
],
zip_safe=False,
)

File diff suppressed because one or more lines are too long

@ -0,0 +1,324 @@
import pytest
from pyrecoy import assets
from numpy.polynomial import Polynomial
class TestAsset:
@pytest.fixture()
def asset(self):
return assets.Asset(
name="TestAsset", max_power=2, min_power=-2, default_load=1, idle_load=0.2
)
def test_init(self, asset):
assert asset.max_power == 2
assert asset.min_power == -2
assert asset.default_load == 1
assert asset.idle_load == 0.2
@pytest.mark.skip(reason="Not implemented.")
def test_set_load(self):
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_set_freq(self):
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_MW_to_MWh(self):
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_MWh_to_MW(self):
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_set_financials(self):
assert NotImplementedError()
class TestEboiler:
@pytest.fixture()
def eboiler(self):
return None
@pytest.mark.skip(reason="Not implemented.")
def test_init(self):
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_set_load(self):
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_set_heat_output(self):
assert NotImplementedError()
class TestHeatpump:
@pytest.fixture
def hp_fixed(self):
return assets.Heatpump(
name="Heatpump w/ Fixed COP", max_th_power=4, cop_curve=2
)
@pytest.fixture
def hp_poly(self):
return assets.Heatpump(
name="Heatpump w/ Polynomial COP", max_th_power=4, cop_curve=[4, -2]
)
@pytest.fixture
def hp_func(self):
def func(Tsource, Tsink):
Tsink += 273
Tsource += 273
c1 = 0.267 * Tsink / (Tsink - Tsource)
c2 = 0.333 * Tsink / (Tsink - Tsource)
return Polynomial([c2, c1])
return assets.Heatpump(
name="Heatpump w/ Func COP", max_th_power=4, cop_curve=func
)
def test_init(self, hp_fixed, hp_poly, hp_func):
assert hp_fixed.max_th_power == 4
assert hp_fixed.min_th_power == 0
assert callable(hp_fixed.cop_curve)
assert callable(hp_poly.cop_curve)
assert callable(hp_func.cop_curve)
def test_get_cop(self, hp_fixed, hp_poly, hp_func):
assert hp_fixed.get_cop(load=4) == 2
assert hp_fixed.get_cop(load=4, Tsink=100, Tsource=10) == 2
assert hp_fixed.get_cop(load=0) == 2
assert hp_fixed.get_cop(load=2) == 2
assert hp_fixed.get_cop(load=1.25) == 2
assert hp_poly.get_cop(load=0) == 4
assert hp_poly.get_cop(load=2) == 3
assert hp_poly.get_cop(load=4) == 2
with pytest.raises(ValueError):
hp_fixed.get_cop(load=5)
with pytest.raises(ValueError):
hp_fixed.get_cop(load=-1)
assert round(hp_func.get_cop(load=3, Tsink=110, Tsource=20), 2) == 2.27
def test_th_to_el_power(self, hp_fixed, hp_poly, hp_func):
assert hp_fixed._th_to_el_power(p_th=4) == -2
assert hp_fixed._th_to_el_power(p_th=2) == -1
assert hp_fixed._th_to_el_power(p_th=0) == 0
assert hp_poly._th_to_el_power(p_th=2) == -2 / 3
assert round(
hp_func._th_to_el_power(p_th=3, Tsink=110, Tsource=20), 2
) == round(-3 / 2.27, 2)
def test_set_load(self, hp_fixed):
with pytest.raises(NotImplementedError):
hp_fixed.set_load(3)
def test_set_heat_output(self, hp_fixed, hp_poly, hp_func):
with pytest.raises(ValueError):
hp_fixed.set_heat_output(th_output=-1)
with pytest.raises(ValueError):
hp_fixed.set_heat_output(th_output=5)
with pytest.raises(ValueError):
hp_fixed.set_heat_output(th_output=5, Tsink=20, Tsource=100)
assert hp_fixed.set_heat_output(th_output=2) == 2
assert hp_fixed.set_heat_output(th_output=2, Tsink=100, Tsource=10) == 2
assert hp_fixed.set_heat_output(th_output=2, return_eload=True) == (2, -1)
assert hp_fixed.get_heat_output() == 2
assert hp_poly.set_heat_output(th_output=2, return_eload=True) == (2, -2 / 3)
assert hp_poly.get_heat_output() == 2
assert hp_func.set_heat_output(th_output=2, Tsink=110, Tsource=20) == 2
def test_cost_function(self, hp_fixed):
assert hp_fixed._cost_function(x=2, c1=40, c2=20, c3=4) == 40 + 40
assert hp_fixed._cost_function(x=2, c1=40, c2=20, c3=5) == 40 + 60
def test_set_opt_load(self, hp_fixed, hp_poly, hp_func):
assert round(hp_fixed.set_opt_load(50, 20, 4), 2) == 0
assert round(hp_fixed.set_opt_load(30, 20, 4), 2) == -2
assert round(hp_fixed.set_opt_load(30, 20, 5), 2) == -2
e_load, th_load = hp_fixed.set_opt_load(30, 20, 5, return_th_load=True)
assert round(e_load, 2) == -2
assert round(th_load, 2) == 4
assert round(hp_poly.set_opt_load(20, 20, 4), 2) == -2
assert round(hp_poly.set_opt_load(30, 20, 4), 2) == -1.27
assert round(hp_poly.set_opt_load(40, 20, 4), 2) == -0.83
assert round(hp_poly.set_opt_load(50, 20, 4), 2) == -0.53
assert round(hp_poly.set_opt_load(60, 20, 4), 2) == -0.31
assert round(hp_poly.set_opt_load(70, 20, 4), 2) == -0.14
assert round(hp_poly.set_opt_load(80, 20, 4), 2) == 0
assert round(hp_poly.set_opt_load(100, 20, 4), 2) == 0
assert round(hp_func.set_opt_load(0, 20, 4, Tsink=110, Tsource=20), 2) == -1.57
assert round(hp_func.set_opt_load(30, 20, 4, Tsink=110, Tsource=20), 2) == -1.57
assert round(hp_func.set_opt_load(100, 20, 4, Tsink=110, Tsource=20), 2) == 0
class TestBattery:
@pytest.fixture
def battery(self):
return assets.Battery(
name="Battery",
rated_power=2,
rated_capacity=2,
roundtrip_eff=0.9,
min_soc=0.2,
max_soc=0.8,
)
def test_init(self, battery):
assert battery.max_power == 2
assert battery.min_power == -2
assert battery.min_soc == 0.2
assert battery.min_chargelevel == 0.4
assert battery.max_soc == 0.8
assert battery.max_chargelevel == 1.6
assert battery.soc == battery.min_soc
assert battery.rt_eff == 0.9
def test_set_soc(self, battery):
battery.set_soc(0.5)
assert battery.get_soc() == 0.5
assert battery.get_chargelevel() == 1
with pytest.raises(ValueError):
battery.set_soc(0.9)
with pytest.raises(ValueError):
battery.set_soc(1.1)
with pytest.raises(ValueError):
battery.set_soc(0.1)
with pytest.raises(ValueError):
battery.set_soc(-0.1)
def test_set_chargelevel(self, battery):
battery.set_chargelevel(1)
assert battery.get_soc() == 0.5
assert battery.get_chargelevel() == 1
battery.set_chargelevel(1.6)
assert battery.get_chargelevel() == 1.6
assert battery.get_chargelevel() == battery.max_chargelevel
assert battery.get_soc() == battery.max_soc
with pytest.raises(ValueError):
battery.set_chargelevel(2)
with pytest.raises(ValueError):
battery.set_chargelevel(1.9)
with pytest.raises(ValueError):
battery.set_chargelevel(0)
with pytest.raises(ValueError):
battery.set_chargelevel(-1)
def test_set_load(self, battery):
battery.set_freq("15T")
battery.set_soc(0.5)
assert round(battery.set_load(2), 4) == 2
assert round(battery.get_load(), 4) == 2
assert round(battery.get_soc(), 4) == 0.25
assert round(battery.get_chargelevel(), 4) == 0.5
battery.set_soc(0.5)
assert round(battery.set_load(-2), 4) == -2
assert round(battery.get_load(), 4) == -2
assert round(battery.get_soc(), 4) == round(0.5 + 0.25 * 0.9, 4)
assert round(battery.get_chargelevel(), 4) == round(1 + 0.5 * 0.9, 4)
battery.set_soc(0.25)
assert round(battery.set_load(2), 4) == 0.4
assert round(battery.get_load(), 4) == 0.4
assert round(battery.get_soc(), 4) == 0.2
assert round(battery.get_chargelevel(), 4) == 0.4
battery.set_soc(0.75)
assert round(battery.set_load(-2), 4) == round(-0.4 / 0.9, 4)
assert round(battery.get_load(), 4) == round(-0.4 / 0.9, 4)
assert round(battery.get_soc(), 4) == 0.8
assert round(battery.get_chargelevel(), 4) == 1.6
battery.set_soc(0.5)
with pytest.warns(UserWarning):
assert round(battery.set_load(3), 4) == 2
battery.set_soc(0.5)
with pytest.warns(UserWarning):
assert round(battery.set_load(-3), 4) == -2
battery.set_soc(0.5)
assert round(battery.set_load("max"), 4) == 2
battery.set_soc(0.5)
assert battery.set_load("min") == -2
def test_charge(self, battery):
battery.set_freq("15T")
battery.set_soc(0.5)
assert battery.charge(2) == -2
assert battery.get_load() == -2
assert battery.get_soc() == round(0.5 + 0.25 * 0.9, 4)
with pytest.raises(ValueError):
battery.charge(-1)
def test_discharge(self, battery):
battery.set_freq("15T")
battery.set_soc(0.5)
assert battery.discharge(2) == 2
assert battery.get_load() == 2
assert battery.get_soc() == 0.25
with pytest.raises(ValueError):
battery.discharge(-1)
class TestGasBoiler:
@pytest.fixture()
def gasboiler(self):
return assets.GasBoiler(name="Gasboiler", max_th_output=10, efficiency=0.90)
def test_init(self, gasboiler):
assert gasboiler.max_power == 10
assert gasboiler.min_power == 0
assert gasboiler.efficiency == 0.9
def test_get_load(self, gasboiler):
with pytest.raises(NotImplementedError):
gasboiler.get_load()
def test_set_load(self, gasboiler):
with pytest.raises(NotImplementedError):
gasboiler.set_load(5)
def test_set_heat_output(self, gasboiler):
assert gasboiler.set_heat_output(5) == 5
assert gasboiler.get_heat_output() == 5
assert gasboiler.set_heat_output("max") == 10
assert gasboiler.set_heat_output("min") == 0
def test_get_gas_cons(self, gasboiler):
gasboiler.set_heat_output(5)
assert gasboiler.get_gas_cons() == -5 / gasboiler.efficiency

@ -0,0 +1,61 @@
import pytest
from pyrecoy.financial import *
@pytest.mark.skip(reason="Not implemented.")
def test_calc_energy_market_results():
assert NotImplementedError()
@pytest.mark.parametrize(
"inputs, exp_output",
[
({"cons": 1, "year": 2020, "base_cons": 0}, -125),
({"cons": 10, "year": 2020, "base_cons": 0}, -1250),
({"cons": 100, "year": 2020, "base_cons": 0}, -6484.7),
({"cons": 1000, "year": 2020, "base_cons": 0}, -37_111.7),
({"cons": 100_000, "year": 2020, "base_cons": 0}, -428_881.7),
({"cons": 100, "year": 2020, "base_cons": 100_000}, -95),
({"cons": 100_000, "year": 2020, "tax_bracket": 1}, -428_881.7),
({"cons": 100_000, "year": 2020, "tax_bracket": 2}, -427_641.2),
({"cons": 100_000, "year": 2020, "tax_bracket": 3}, -424_146),
({"cons": 100_000, "year": 2020, "tax_bracket": 4}, -95_000),
(
{"cons": 100_000, "electr": False, "year": 2020, "base_cons": 100},
-547_302.20,
),
(
{
"cons": 100_000,
"electr": False,
"year": 2020,
"base_cons": 0,
"m3": True,
},
-41_057,
),
(
{
"cons": 100_000,
"electr": False,
"year": 2020,
"base_cons": 0,
"horti": True,
"m3": True,
},
-6_588,
),
],
)
def test_calculate_eb_ode(inputs, exp_output):
assert calculate_eb_ode(**inputs) == exp_output
@pytest.mark.skip(reason="Not implemented.")
def test_income_tax():
assert NotImplementedError()
@pytest.mark.skip(reason="Not implemented.")
def test_calc_business_case():
assert NotImplementedError()
Loading…
Cancel
Save