diff --git a/docs/source/_static/mathematics/linear/converter.png b/docs/source/_static/mathematics/linear/converter.png new file mode 100644 index 0000000..921bf79 Binary files /dev/null and b/docs/source/_static/mathematics/linear/converter.png differ diff --git a/docs/source/architecture/analyzer.rst b/docs/source/architecture/analyzer.rst index c60c77c..b403698 100644 --- a/docs/source/architecture/analyzer.rst +++ b/docs/source/architecture/analyzer.rst @@ -1,13 +1,13 @@ Analyzer ======== -For a high abstraction and to be agnostic about technology, Hadar uses objects as glue for optimizer. Objects are cool, but are too complicated to manipulated for data analysis. Analyzer contains tools to help analyzing study result. +For a high abstraction and to be agnostic about technology, Hadar uses objects as glue for optimizer. Objects are cool, but are too complicated to manipulated for data analysis. Analyzer contains tools to help analyzing study and result. Today, there is only :code:`ResultAnalyzer`, with two features level: * **high level** user asks directly to compute global cost and global remain capacity, etc. -* **low level** user asks *raw* data represented inside pandas Dataframe. +* **low level** user build query and get *raw* data represented inside pandas Dataframe. Before speaking about this features, let's see how data are transformed. @@ -16,26 +16,26 @@ Flatten Data As said above, object is nice to encapsulate data and represent it into agnostic form. Objects can be serialized into JSON or something else to be used by another software maybe in another language. But keep object to analyze data is awful. -Python has a very efficient tool for data analysis : pandas. Therefore challenge is to transform object into pandas Dataframe. Solution used is to flatten data to fill into table. +Python has a very efficient tool for data analysis : pandas. Therefore challenge is to transform object into pandas Dataframe. Solution is to flatten data to fill into table. Consumption *********** For example with consumption. Data into :code:`Study` is cost and asked quantity. And in :code:`Result` it's cost (same) and given quantity. This tuple *(cost, asked, given)* is present for each node, each consumption attached on this node, each scenario and each timestep. If we want to flatten data, we need to fill this table -+------+------+------+------+------+------+------+ -| cost | asked| given| node | name | scn | t | -+------+------+------+------+------+------+------+ -| 10 | 5 | 5 | fr | load | 0 | 0 | -+------+------+------+------+------+------+------+ -| 10 | 7 | 7 | fr | load | 0 | 1 | -+------+------+------+------+------+------+------+ -| 10 | 7 | 5 | fr | load | 1 | 0 | -+------+------+------+------+------+------+------+ -| 10 | 6 | 6 | fr | load | 1 | 1 | -+------+------+------+------+------+------+------+ -| ... | ... | ... | ... | ... | .. | ... | -+------+------+------+------+------+------+------+ ++------+------+------+------+------+------+------+------------+ +| cost | asked| given| node | name | scn | t | network | ++------+------+------+------+------+------+------+------------+ +| 10 | 5 | 5 | fr | load | 0 | 0 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 7 | 7 | fr | load | 0 | 1 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 7 | 5 | fr | load | 1 | 0 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 6 | 6 | fr | load | 1 | 1 | default | ++------+------+------+------+------+------+------+------------+ +| ... | ... | ... | ... | ... | .. | ... | ... | ++------+------+------+------+------+------+------+------------+ It is the purpose of :code:`_build_consumption(study: Study, result: Result) -> pd.Dataframe` to build this array @@ -44,45 +44,107 @@ Production Production follow the same pattern. However, they don't have *asked* and *given* but *available* and *used* quantity. Therefore table looks like -+------+------+------+------+------+------+------+ -| cost | avail| used | node | name | scn | t | -+------+------+------+------+------+------+------+ -| 10 | 100 | 21 | fr | coal | 0 | 0 | -+------+------+------+------+------+------+------+ -| 10 | 100 | 36 | fr | coal | 0 | 1 | -+------+------+------+------+------+------+------+ -| 10 | 100 | 12 | fr | coal | 1 | 0 | -+------+------+------+------+------+------+------+ -| 10 | 100 | 81 | fr | coal | 1 | 1 | -+------+------+------+------+------+------+------+ -| ... | ... | ... | ... | ... | .. | ... | -+------+------+------+------+------+------+------+ ++------+------+------+------+------+------+------+------------+ +| cost | avail| used | node | name | scn | t | network | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 21 | fr | coal | 0 | 0 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 36 | fr | coal | 0 | 1 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 12 | fr | coal | 1 | 0 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 81 | fr | coal | 1 | 1 | default | ++------+------+------+------+------+------+------+------------+ +| ... | ... | ... | ... | ... | .. | ... | ... | ++------+------+------+------+------+------+------+------------+ It's done by :code:`_build_production(study: Study, result: Result) -> pd.Dataframe` method. +Storage +******* + +Storage follow the same pattern. Therefore table looks like. + ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ +|max_capacity | capacity | max_flow_in | flow_in | max_flow_out | flow_out | cost | init_capacity | eff | node | name | scn | t | network | ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ +| 12000 | 678 | 400 | 214 | 400 | 0 | 10 | 0 | .99 | fr | cell | 0 | 0 | default | ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ +| 12000 | 892 | 400 | 53 | 400 | 0 | 10 | 0 | .99 | fr | cell | 0 | 1 | default | ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ +| 12000 | 945 | 400 | 0 | 400 | 87 | 10 | 0 | .99 | fr | cell | 1 | 0 | default | ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ +| 12000 | 853 | 400 | 0 | 400 | 0 | 10 | 0 | .99 | fr | cell | 1 | 1 | default | ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ +| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | .. | ... | ... | ++-------------+----------+-------------+---------+--------------+----------+------+---------------+-----+------+------+------+------+------------+ + + +It's done by :code:`_build_storage(study: Study, result: Result) -> pd.Dataframe` method. + + Link **** Link follow the same pattern. Hierarchical structure naming change. There are not *node* and *name* but *source* and *destination*. Therefore table looks like. -+------+------+------+------+------+------+------+ -| cost | avail| used | src | dest | scn | t | -+------+------+------+------+------+------+------+ -| 10 | 100 | 21 | fr | uk | 0 | 0 | -+------+------+------+------+------+------+------+ -| 10 | 100 | 36 | fr | uk | 0 | 1 | -+------+------+------+------+------+------+------+ -| 10 | 100 | 12 | fr | uk | 1 | 0 | -+------+------+------+------+------+------+------+ -| 10 | 100 | 81 | fr | uk | 1 | 1 | -+------+------+------+------+------+------+------+ -| ... | ... | ... | ... | ... | .. | .. | -+------+------+------+------+------+------+------+ ++------+------+------+------+------+------+------+------------+ +| cost | avail| used | src | dest | scn | t | network | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 21 | fr | uk | 0 | 0 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 36 | fr | uk | 0 | 1 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 12 | fr | uk | 1 | 0 | default | ++------+------+------+------+------+------+------+------------+ +| 10 | 100 | 81 | fr | uk | 1 | 1 | default | ++------+------+------+------+------+------+------+------------+ +| ... | ... | ... | ... | ... | .. | .. | ... | ++------+------+------+------+------+------+------+------------+ It's done by :code:`_build_link(study: Study, result: Result) -> pd.Dataframe` method. +Converter +********* + +Converter follow the same pattern, it just split in two tables. One for source element: + ++-----+-------+------+------+------+------+------+------------+ +| max | ratio | flow | node | name | scn | t | network | ++-----+-------+------+------+------+------+------+------------+ +| 100 | .4 | 52 | fr | conv | 0 | 0 | default | ++-----+-------+------+------+------+------+------+------------+ +| 100 | .4 | 87 | fr | conv | 0 | 1 | default | ++-----+-------+------+------+------+------+------+------------+ +| 100 | .4 | 23 | fr | conv | 1 | 0 | default | ++-----+-------+------+------+------+------+------+------------+ +| 100 | .4 | 58 | fr | conv | 1 | 1 | default | ++-----+-------+------+------+------+------+------+------------+ +| ... | ... | ... | ... | ... | .. | ... | ... | ++-----+-------+------+------+------+------+------+------------+ + +It's done by :code:`_build_src_converter(study: Study, result: Result) -> pd.Dataframe` method. + +And an other for destination element, tables are near identical. Source has special attributes called *ratio* and destintion has special attribute called *cost*: + ++-----+-------+------+------+------+------+------+------------+ +| max | cost | flow | node | name | scn | t | network | ++-----+-------+------+------+------+------+------+------------+ +| 100 | 20 | 52 | fr | conv | 0 | 0 | default | ++-----+-------+------+------+------+------+------+------------+ +| 100 | 20 | 87 | fr | conv | 0 | 1 | default | ++-----+-------+------+------+------+------+------+------------+ +| 100 | 20 | 23 | fr | conv | 1 | 0 | default | ++-----+-------+------+------+------+------+------+------------+ +| 100 | 20 | 58 | fr | conv | 1 | 1 | default | ++-----+-------+------+------+------+------+------+------------+ +| ... | ... | ... | ... | ... | .. | ... | ... | ++-----+-------+------+------+------+------+------+------------+ + +It's done by :code:`_build_dest_converter(study: Study, result: Result) -> pd.Dataframe` method. + Low level analysis power with a *FluentAPISelector* --------------------------------------------------- @@ -165,39 +227,3 @@ Unlike low level, high level focus on provides ready to use data. Unlike low lev * :code:`get_cost(self, node: str) -> np.ndarray:` method which according to node given returns a matrix (scenario, horizon) shape with summarize cost. * :code:`get_balance(self, node: str) -> np.ndarray` method which according to node given returns a matrix (scenario, horizon) shape with exchange balance (i.e. sum of exportation minus sum of importation) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -j diff --git a/docs/source/architecture/optimizer.rst b/docs/source/architecture/optimizer.rst index 3688640..1034b09 100644 --- a/docs/source/architecture/optimizer.rst +++ b/docs/source/architecture/optimizer.rst @@ -17,7 +17,7 @@ Today, two optimizers are present :code:`LPOptimizer` and :code:`RemoteOptimizer RemoteOptimizer --------------- -Let's start by the simplest. :code:`RemoteOptimizer` is a client to hadar server. As you may know Hadar exist like a python library, but has also a tiny project to package hadar inside web server. You can find more details on this server in this `repository. `_ +Let's start by the simplest. :code:`RemoteOptimizer` is a client to hadar server. As you may know Hadar exist like a python library, but has also a tiny project to package hadar inside web server. You can find more details on this server in this `repository. `_ Client implements :code:`Optimizer` interface. Like that, to deploy compute on a data-center, only one line of code changes. :: @@ -41,7 +41,7 @@ Analyze that in details. InputMapper ************ -If you look in code, you will see two domains. One at :code:`hadar.optimizer.[input/output]` and another at :code:`hadar.optimizer.lp.domain` . If you look carefully it seems the same :code:`Consumption` , :code:`OutputConsumption` in one hand, :code:`LPConsumption` in other hand. The only change is a new attribute in :code:`LP*` called :code:`variable` . Variables are the parameters of the problem. It's what or-tools has to find, i.e. power used for production, capacity used for border and lost of load for consumption. +If you look in code, you will see three domains. One at :code:`hadar.optimizer.input`, :code:`hadar.optimizer.output` and another at :code:`hadar.optimizer.lp.domain` . If you look carefully it seems the same :code:`Consumption` , :code:`OutputConsumption` in one hand, :code:`LPConsumption` in other hand. The only change is a new attribute in :code:`LP*` called :code:`variable` . Variables are the parameters of the problem. It's what or-tools has to find, i.e. power used for production, capacity used for border and lost of load for consumption. Therefore, :code:`InputMapper` roles are just to create new object with ortools Variables initialized, like we can see in this code snippet. :: @@ -58,7 +58,7 @@ Therefore, :code:`InputMapper` roles are just to create new object with ortools OutputMapper ************ -At the end, :code:`OutputMapper` does the reverse thing. :code:`LP*` objects have computed :code:`Variables`. We need to extract result find by or-tool to :code:`Result` object. +At the end, :code:`OutputMapper` does the reverse thing. :code:`LP*` objects have computed :code:`Variables`. We need to extract result found by or-tool to :code:`Result` object. Mapping of :code:`LPProduction` and :code:`LPLink` are straight forward. I propose you to look at :code:`LPConsumption` code :: @@ -79,6 +79,10 @@ Hadar has to build problem optimization. These algorithms are encapsulated insid :code:`ObjectiveBuilder` takes node by its method :code:`add_node`. Then for all productions, consumptions, links, it adds :math:`variable * cost` into objective equation. +:code:`StorageBuilder` build constraints for each storage element. Constraints care about a strict volume integrity (i.e. volume is the sum of last volume + input - output) + +:code:`ConverterBuilder` build ratio constraints between each inputs converter to output. + :code:`AdequacyBuilder` is a bit more tricky. For each node, it will create a new adequacy constraint equation (c.f. :ref:`Linear Model `). Coefficients, here are 1 or -1 depending of *inner* power or *outer* power. Have you seen these line ? :: self.constraints[(t, link.src)].SetCoefficient(link.variable, -1) # Export from src @@ -136,13 +140,15 @@ It should work, but in fact not... I don't know why, when multiprocessing want t Study ----- -:code:`Study` is a *API object* I means it encapsulates all data needed to compute adequacy. It's the glue between workflow (or any other preprocessing) and optimizer. Study has an hierarchical structure of 3 levels : +:code: Study` is a *API object* I means it encapsulates all data needed to compute adequacy. It's the glue between workflow (or any other preprocessing) and optimizer. Study has an hierarchical structure of 3 levels : + +#. study level with set of networks and converter (:code:`Converter`) -#. node level with node name as key. +#. network level (:code:`InputNetwork`) with set of nodes. -#. type elements level with *consumption*, *production* and *link* entries. Represented by :code:`InputNode` object. +#. node level (:code:`InputNode`) with set of consumptions, productions, storages and links elements. -#. element with index as key. Represented by :code:`Consumption`, :code:`Production`, :code:`Link` objects +#. element level (:code:`Consumption`, :code:`Production`, :code:`Storage`, :code:`Link`). According to element type, some attributes are numpy 2D matrix with shape(nb_scn, horizon) Most important attribute could be :code:`quantity` which represent quantity of power used in network. For link, is a transfert capacity. For production is a generation capacity. For consumption is a forced load to sustain. @@ -175,9 +181,9 @@ In the case of optimizer, *Fluent API Selector* is represented by :code:`Network * You can only downstream deeper step by step (i.e. :code:`network()` then :code:`node()`, then :code:`consumption()` ) -* But you can upstream as you want (i.e. go direcly from :code:`consumption()` to :code:`network()` ) +* But you can upstream as you want (i.e. go direcly from :code:`consumption()` to :code:`network()` or :code:`converter()` ) -To help user, quantity field is flexible: +To help user, quantity and cost fields are flexible: * lists are converted to numpy array diff --git a/docs/source/graphics.drawio b/docs/source/graphics.drawio index c3c20d7..006b480 100644 --- a/docs/source/graphics.drawio +++ b/docs/source/graphics.drawio @@ -1 +1 @@ -7Ztbc5s4FMc/jR+TQciAeUy8STud7Ux3M7vt9k0LslEjIyqEL/vpVzIXg8D1JTJxJngyEzjIR5f/T0dCkkdwulh/4CiJPrMQ05FthesR/G1k275tj9SfFW5yg+dYuWHOSZibwM7wRP7DhbFMlpEQp42EgjEqSNI0BiyOcSAaNsQ5WzWTzRht5pqgOW4ZngJE29avJBRRbp3Y3s7+EZN5VOYMXD9/skBl4qImaYRCtqqZ4MMITjljIr9arKeYqrYr2yX/3uOep1XBOI7FMV94iP+OQLYCzys4IZPk21fnr+VN4WWJaFZUuCis2JQtgOPwTjWkvAsoSlMSjOB9JBZUGoC8TAVnz1XjyGrd5y5w2GrZXVFB1QASHMwWWPCNTLLaNXHJSVRr3dLGMUWCLJvuUaH0vHJX5fCFEZmxbRVQAq/wUzA5Lv2WLlKW8QAX36o36SFHmh+B+ByLlh95Uav1zrQV7ATx7Hcp3rjZ5tA9UzzPP+Dowuo5LfX+xAtEYmmbogQFRBAZ+HRBBV6LLgmnjDIuLTGLZcr7GaFUMyFK5rG8pXimPCwxF0TGubvCvCBhqDK5X0VE4CdZAJXjSgZ1aeMsi0OsamMp5ywWRaQGToWMcojXp0JTfkEXo7itMeV2MAWt/fg09DpVHPdw1woyvtw2iRKh1s+K5q4pVG8vN0/8SFR58tasSTWy4Ww2s4Ogpat8Err/uo7b6rTjt9BptUBp+4Y6beX4QKeV4qBNLVmiEqT7C1zRVuTj+NYvy1UNIWW5xo308iIvgdEIMumA1KWqc5MGqu7PTE0ythzepFsQ72QC4CTrLTvlc3k1L/7TMv2LHKEQ/8xQsCkdylrmPpv5SDPZ2YwHvED2AcyvLuRN9pBc65FeR4+0LxXyyhn39eJEUfAsn7OZqv5VoMVzXa6bLOi/Nlnjw6NpbQBlMl1TlhClUTXUqpsvSMguHW8tEpjOiaw+6qr72mg6lR/LaozG4C2MpLq4505/W450+S88/QXt+e8AxZlQyPnHrWMGC1mmhqNx31gcMfMesDgSC9dQrLBd0ISi51dl4A1QGIPi0LLH0VD4rwxF1+vPAMVZUEBoCAoINSj6Hj78AQpjUHimoNAd9b1O3vVmO0BxFhRjYAgK3VH11toXFKdtfQ1Q/AoKxxQUmqPed9TsAQpTUDiWISh0R9XZgL6ggAMUxqAwtXurOwL6jtKFoYBtKEicZOLd7Nh6upId+xe9btnC9irzZ1ltfDNFnDKlSYBjxMn2Ei0SSuL5tYtlV/dFEYEB5fRB1m0LV70P9qPcsBRsLsBqa37A8m89x699zou3LjjN76XDb9cy8SmblXDPHmMZJNK3uYMIzQT3cSlnpXY7RnRBfLEtRNi1AmxC7zkL5eNZFgeCsPh9i64t0wC7Lbrn9yn6aSu8r3HwqinKmxscbBnEXUMDQoevkw9jvWCY+JA8/niKwafv8NPHH/9Y3//43RI3w5ubOXZMHTzQHYHLLfx1MjEcRjHGhGsbYkJ3dOyxTlNMDC8g5pgwdehAd9Q3E8NBFGNMeKaWgnVHfTMxnEMxxwT0b8tzPS/Gou3LHBnydvcjvTz57peO8OF/7Ztfj+I2EMA/DY89JXH+Ph7Z46r2Wq1E1d49nazEkFxDjByzQD99HWKTxM4B264PW4IHFE9sJ57fDB57zAykm8NHArfFbzhH1cxz8sMMPM08LwIR+24Fx04QOk4nWJMy70RuL1iW/yAuFNV2ZY6aUUWKcUXL7ViY4bpGGR3JICF4P662wtX4qVu4RopgmcFKlf5V5rTgUjdM+hs/o3Jd8EfHHh/wBorKfCRNAXO8H4jAhxlICca0u9ocUlS1uhN6+fLnH8m39S/xDoW/fi2a56/k+OmnrrPFa5qch0BQTd+2a6/r+gVWO64vPlZ6FAokeFfnqO3EmYH5vigpWm5h1t7dM4thsoJuKlZy2eWqrKoUV5ic2gKHfZKkleOacttwPV4e1EvZx2m7h1W5rpmMdEDmDSX4byRq1rhmPczzkjBLKXFbscG7FtOcjwMRig4S+Ctac88omQsgvEGUHFk73osv6HPzBzEv7wfGxEXFwIxc4SaQ2+/63HXPiF1wTK9ABnQjAyAMrUbmm4bM140sDNPUamShacgCvciSJE0XC6uRxaYhC3Uja2czm5EFQWAYskgvssUiDO2ey4LINGTiaTonMwBsZhYGpv0yuq5uZotFbPVPY5gYx0z70oyzsJdZZBwzzWuz+WlGs5lZ5JrGzFOZfcIwV7gxRdAxnEllDolxkYCQMcUhJp+3ai0zWL3nNzZlnrePmbSGsb0MwccqeMfhMaoufr4j8QNT/CYAetr4qYvrFMPqwW86ngTX+cU/FJ+60G5wBcmD3/ScJ+9t3d//Yt1xiu2bW6Hkc0F49zkv0cvM/q0SeQ13f2bCiB6x5a3rAQOYac/V2J5eAyAwjZn2ZI3t+bXAPGbat/6tz9aYx0zz3r/9MQgIjGOmOda3P1/jG8fM1xw32s9MTosawMzTHYPYnq/xI+OYaY/1bc/XyLlsA5hpjvXtX1MD85jdEINkO/JyQtYyQXX+vj1HzIpZBZumzMbMctgU58pt4RlSikh9knhOv9Msjg97EtBL+8BM9+T4mRvPqfClLbxjauXlp8Pw7tNRlA4l/SxGwK67ZgEv9Y3agmjT6QHlyoHoW0yAKRDvSIYuqb6rRyFZo0v9dVvCqk0NbCaYsBkhI6iCtHwZD2LKjvgTnnHJhtebrJTKD3zJFLtx8la9NSodyedI/UTqqFOE0hGzNngcVNu2FZrvv7AfSj7mORffS9lPHtdnF90b9D52ZvA/3O6GLWPz3c6z0e3CW90uuafbKVG0SMy+1u0iz3s37iqUPfiNHC+QTh0HzhXHC+JL9fU4XnDD2S2DHG/Sf6LLDvQ9b73mrLodTxznuep5wJnd0fMi8Z7CLN3/6HlhJNm3pglPfeErficHoe6P8Dt1Dd5kNROIf4M9jiZISwXpaNc5HXe3owmBuiTvELoPhNO7KuYhVFfoHUJ1EnwgHP42GoRw4oTXCeHvD4TT5xgc4xCq+dVlsVutKqZqz1nCzbYq67UGnBVa0TeD6UUXYJ7q8Vd3dZH1pDWS7wQK2eRCoPgKsKzY/zO9i4n6v/eDD/8C7Vjbbts4EP0aAbsPXViib3m0nLQF6rbeeJFe3miJlthSokCNYrtfv0OKkqwL3KZZo1vAeQg4h8MhNedwOLBDlsnhlaJZ/FaGTDjeKDw45NbxvNmU4H8NHEtg6rklECkeltAJsOHfmAVHFi14yPKWI0gpgGdtMJBpygJoYVQpuW+77aRo75rRiPWATUBFH/3AQ4gt6k5vmonXjEex3XruzcqJhFbO9kvymIZyfwKRO4cslZRQjpLDkgmduyov+Xq7uXvDPsPD/SbcLt4VD3+LF2Wwl09ZUn+CYin8t6G9MvQjFYXNl7P0nIXPU2BqRwOmTd93vKnAnf2twlGkRzVSAe8z4Amyr6oZPMm28TYJhGPFSgyJwJGLczsuxFIKqcwM2Zk/jcsUrJyQktI+8RuZP8Sp4FGKWMD0mevNHpkCduho4DsJdGtW8TYwmTBQR1xno5C5FYK9CTNr7htZ1aqPTxTlViC1Uo7q0A1dOLCMPUUYbi+xLET1W1MqiGUkUyruGtRXskhDpsPq7DU+KykzS8kXBnC0uacFSIROCGMHDh/18r8m1vpkg+nx7eHUOFojpHls9nStsaaAbKUG8UYmbBou9H3XB89YWiIvuc6ICZGDkl/rO+w9QSFIujqenFibn6qP0UZzZmNVhy6TqzP6EzJCVmShAnbGz1ZWoCpi5+J5w7JUTFDgj+3DDWnMLl1LvNWNnMezeUvO7txrhyjPZVd1lFof4+fFS3qlZ7U+KSIdXed7ngiasop2O+MOFQB99Tm+AAs7AVrYfhBzEa7oURY61znQ4Gtl+bFU/BuGpZXGcVpV2hqPWx4bvdIqRLEcfdaVINwO9JYeWo4rmoMFAikEzXK+rT8jwXTz1JcAMrFOzy2Nl6qELpm0pEPItFcKCRkohZXkzqj0HpsAmkaYlma7Ubvw3vQLrzsd2G3cqbtU6JJDgfm6BOaXEPWk/552lSy4UXG7nHUfwlQap0ragu1gQNgJD0NT0/OMBjyN/jEV/IXbICuz8JY0yL3NDzFPAVCgpQS1XgTdMrGWOQcudXxV+vqZrgEmiRPfmdwaRKHiUvwIyo2QGEp7z3J4lurIsOos7XgPf0hkZzT2rNd2OsCtb+q8eGR/5FCExz8dsjCVOS8E9JjHfEDNfIfpp5NfVrU2z+M+zxqSuHYnzNMao2TM49rlvs2xj3ld6ucS2faWaLuN/csEMPF+TADe9FL9Vv82X/ut36ffmv3ifqv9irk3nefpwv3WrFe+7lkigV17rt+i55qMrj3XcN7m157rGaqb/a97rptrz3Xpnuu8AC7Yc6HZ/HhaFoPmF2hy9y8=7V3bcts4Ev0avWyVXbxfHmMn3t2qSbwbz87sPKUoEpKYUIRCUbGdrx+AF0kEYBm6gEAMKFUxCREg2KfZp9FoQBP3dvn0zypZLT7CDBQTx8qeJu77iePYfmShP7jkuStxoqAtmVd51pXtCh7yn6Ar7CrON3kG1oMLawiLOl8NC1NYliCtB2VJVcHH4WUzWAzvukrmgCp4SJOCLv0zz+pFWxo54a78XyCfL/o720HcfrNM+ou7J1kvkgw+7hW5HybubQVh3R4tn25BgaXXy+VT8XF+9XPz6esf8D74728f/mfX367axu6OqbJ9hAqU9YWbtp228R9Jsekk1j1t/dyL8HGR1+BhlaT4/BHpycS9WdTLAp3Z6HCWF8UtLGDVXO3Omg8uh2Xd6QOSdnu+d53VfFB5UuTzEpWl6OkA+vKm6xGoavBEgPiKBOwtLEihAVyCunpG9bpWfKtrp9flXkcfd4rhBl3ZYl8pwq4w6ZRxvm17J3B00Mn8KPm7l5Z/loBolg7lb0csOa/rCn4De1WDNALTmWQEYgYAvjj5exeXvw+izDtJ/pEzdYNApPxj5eTvC7A/Tnqa/mfBNPBFyj+IlJN/cHH5Ryk4Uf7TyPd8S6D8w1A1+ffi1oJ/+5e9l74rn38dvfiXA4GR9V8r/g0s5eSvF//GyslfL/6NVJO/HWrEv5Y1kL5nyedfpJk68S8HAiPrf6wT//qxavLvg4ua8G+knPxtrfg3VE3+lsbjX8+Rz7+W1uNfFgIj67/W418F5K/1+FcB+Ws9/lVA/jqNf/tgyzb638/1yyRgvQbAPBCM/AZoNQIObOUAsLUaAoeWegDoNQaOVQMgujgFKMzBHin9SDoHR5dnAJUpmAeBUfU/vjgBKM3ArnLyv7z9V5mAKRdUuvwdnfg3ojxQ2fK/vPjVpd9+wr0Xvk8HIEZmXzoAXcIMH82qt8zCHECM+hLQcWgk6iU6KmCSvWU6dhQDgg5Ir1MsI+tMEJTmZFsxEOiodH0JCFSmZUsxCOjA9Peakn+1gMvpBt365kgkfPyPgQTB0O2HjzKaj0jKICbvHcbkQcRAKBKFEB22SOGaxugVZJL1ql33N8ufQPY2oAo9xaASsMzjl/FxPU+6j8vIMtXRyWUhMW6omzZZenq58pGgY676ubnSUWBkn+rm58rHgI6+Gkd3AJHPWKU5qvfUO0vG06WJRTms6HjiHwntZKGW8tUavP4+vUXQyDRw+aCxsmCDou7EiL6Ztfig0u8bvOHKze/5EqzRF5/AI/r/M1wm5e5LdDTv/jaNTPsCOP3al6GOTveuS5YY+HK6XjXnFl3UVluv8I32VKm/J+7oVVf8Dl1RwmqZFHSflnm514W2uSN6cXrRP8S2/4t1f4zutrpXUdpIl/QFKSwxr5yooRcuknDTqzf3UIKfSOzjNPbtBZ1F2tna5r6YYFjEWbh8j0qHRFfCEtPvPp12RT1DFmCGW8D8l6dJ8a4rriHuG4u2K7gpM0zS7/EIgjcw1VzX9dsRRblkKCRkhNfD/ppBUKofn1yedDmWPgh1k1ioUG5SpwQSPFvPfxUxjwGYOLw4Yrga4xXGquHFsbRCY7xi/3WLOC5eHGm4GuNFhpAjxqBxXLw40nY1xosMNkvHq/duDF5c/CUfL460YI3xikLV8KKj1AavPX8jUM0ecmy9oDFetuWqBhjHXg3DEMT5c3VqYxQ6ivnwLkdMQy+ISJqSDxFHGEMviChLJx8jjtCFThkJtq3a4NdlDH7dO4H+wyxN45h4qxwudNzAjd3sQAB++8Matkg7SLxjfkAjyIq/u6IQ7FXDIHhiqsk2i0EagowBskHw0DtIuovSEWQMmQ2ChwbRkWoIMgbRBsGD05qEJ8P6EYZREWSMqg2ChwbdZFxEOoKMQbdB8BAPklMz0hFkjMkNgocQjFVDkDFiNwge4cnIR5CRfmAQPBg281WDkJWRYNLYlc4DN2nsbz2NfWyE32Rau0ljN2nsE7x55JBy4352UV4ae+8EmDQKdrzH919FbNSZK9+klR0cWVqq4cWIkRu8Xkwrk4+XSSs7aA/JjRctVzZgHGllGgNG5pgpAJhZOHcMgykAmFk5dxAw8ld55QNmls4d9DlC5UyiWTt3OFTtKYcYx+o5vdJwyXl16Z58wBHZ0AsikqnkQ8QRzNALIsrUyceII4ChVzK7ckPgkPUa8UxUzpJlXjy3M5XbsD+Wt4f+Ztn8y6qCT/kyKeGPhPq+qXPLUfPLJv9CX3RUdfu86s551d3zqnvnVffPqx4cUb2rOsHz1kchvgDFD4DnZ3BVgJTwpKrcddZgDtE9rE1+RKUyb2wUehmSEv8pjqhb5FNQJXUOy2197rpwBU6oNasAOLJKUuXJgYfCrV2tQZXPcFHzRZut0NhdbAJs79pbPTESKD44k/h2EnsvpiyIGqtY1t1dRE7j/UpZNgGRJxXHNFeMmmQTsujccIXhCsMVhisMV6i0xlI+V7DmQg1XGK4wXGG4wnCF1PUXxAoa+Vzx8h70hisMVxiuMFxhuEJSDMonE1R82WTBSgEzZGHIwpCFIQtDFlK3x6BSraSTBSv91JCFIQtDFoYsDFmoNGOhAFmwUt8NWRiyMGRhyMKQhdQpC2LbNgXIgrXsxpCFIQtDFoYsDFmoNL+tAFm8vDudIQtDFoYsDFkYspC1lSmxi5B8tog4VrYeuVay+RAoHdrRjkJPlPj9fhlvL33HimnpBwzx26Ew+dPLXEqY4aNZdSYQWQKiWToEolm0yrE4NY3AdCYbipiBhC8MCDqHHAl7iY4KmGTnQuGDKPNOgiJypq7gnQOVg4JO0VynWErW+abJSU97I7JgGvhCt6XwlIOBTn6qLwFClIITQZhGvudbAkGIXOVAoJMKvtcUAjrtaxBYJEYhjVHEwCgShhE9l5fCNY2SgP2QVAcrDFUDC/WAQuvf5WpTf0xWK0D7XEdt6zIYRRDw3TUfxuhFul9sE6MS17EojByHBZIvDCSejcbK7F1VNeJOi2S9zlMCjEbb/+yewT1C8kiY1fP/MeTXfn/6V6cBzcn7p8HZc3fWdhFkc3ACPI5VJ9UcHLyu1V0ayD2g/APsVIEiqfMfw96xwOvu8B+YNz9n0TvtFjF6jYmXdA03VQq6WjsVoBryyB/ptImGWklQDSG4k+e9y1b4gvXLHXbIDfS6Du9Us21xp6hbmZ6ju3QA9qHeZLjjcPoVm3hSk8/ffL5qwaZ2n1/mWVa8FHoZmrah7eJ8U0bZjd4hJ2RZ9sli6H0gjERcjrDJGPZpYJ2uHf8VAwWe8nrPrqGzv7rO4ONdJXxyIaPW2gQeQn7d/NlSzZ9L0GRgWdfW3sc5zRjaLtGMO7hL6MTX8d4nGsVSet1Ppoq1lC4d/PpQgCVoukr+ksdjjjB2rE35rYSPTaUqT6YFWBtzep675zFi0Mx94QSaUzr4dt9QJXpbbzZ5kWHHnFSIdxn4vknS5+0FGnruxK5NQcCAclzP3eXZ5v5tee7y/PHh4Np3/UsQkhParGa2KuaNwkfUo43BRzw7/huvTprCH9bM0HeuvZc1k1f9ybcqjMZxwOi7jqLydHwZrup8+bPPb1hVEDlZS+yfw8adLxB8xuM6y+MKY84Am0CPi46C3n+++v3+/rcH40jhOR7pIVBXagj0l+QH0nRH5PtzhAvEVgfBVr/vsGCbz/GbEALdHOOtNCp12K3g9laoxSrCYuk2+0ZidbWPQ3D4J41rkhnf5DzfBOHKGUMQ55z0qTCG+k6fm7PIGXRu7guYhqlvdzv5Inqurn8Awfalf7H2/OBNbdIBhpB7fBZBnDPs8fzaqXFZzndZmFNaVDbc0V6KQ6iUQyZUi7IifYFgK0IHDj+D9abAPTNz/ifN+VMaE9GJY5ea9EenFcSLXHZKgaS1+AgzgK/4Gw==7Zxbb6M4FMc/TaTOQyOuCTw2SWd3pM6q2lQ7O/tSucEhVg2OjDNN5tOvAXM1obmHIHiYxgdjwjl//2wfPOnpY2/9BwXLxXfiQNzTFGfd0yc9TVNNS+F/QstGWDRrEFtcihxhywxT9BsKo7jQXSEHBoWKjBDM0LJonBHfhzNWsAFKyUex2pzg4l2XwIWSYToDWLb+QA5bCKs6sLMTf0LkLsStLW0Yn/BAUlk8SbAADvnImfTHnj6mhLD4k7ceQxx6L/HL++p9/g39Z25+MOfeYtN/NqvlfdzY130uSR+BQp+dtmkRy18Ar4S//uIS+OY7cN3TBpjfbTRlFPkuL7lhSbiCbRL/Bh/Iw8DnpdGc+Gwqzqi8DDByff55xr82pNzwC1KGeGgexAlGltw6WyDsPIENWYUPFzAwe09KowWh6DdvFmDRJj9NmVCZoRVqTMMruVnhVgoDXuc58ZhaMn0H60LFJxAwYZgRjMEyQG/pY3iAusgfEcaIJyqlUgjPzxHGY4IJjRyif42OxB3xNw3FEpVz9ZToSF0a+gauS5r9JOBqqkLefyHxIKMbfp1oZSB0K3quoRpx+SPrBtpQ1FnkeoApbED0PDdtOVMX/yAEtofYLElskpowipQUMEre0/5adrJPokqJvDCcswpxechxcNTYEsy4gl9CsU3u1czyFF040TPL38ILoYkSBhiIZRCGCYM3iJ9JgBgiYfs0rjtaEuSzyFXmqGdOIgvlgfb5QwAUxQ9yeX3AgB0V7EF1sJPoarsF1zpTbBPc50kCvI4krSCJ3jiUqGrHkmPCrVTHuykwMaToTumsY0kbWGJbjWOJ2bHkmHAbzWaJvMSZ8EfuYNIGmKj8Fk2jybCjyTHxbvYyR5OCWwDJSyMZonQMqUuTlCYkalLOa8ys0JhqnIshegVCRuGNwmYe+Ic3QjAEPlcb95Aae2cUJVHxyvODuFIgRjUl7LLhvxNJldyVLAVRCTz7syjWaRE7hoyd0ET4tXMcaWPBCQb9ChQVkTPizh4rfTN6jDEvq1n5jDzS6nG0o1TOphR5odQNNqcK7q4cONdYU7UsCTvy66sLGWLQe329C/+I7v5S7OnKl9i8bXwqkQMFrwBzEdx9qScMFw0fpO6ceVzN4fGcU+BBcVkAKYKFizrinFaU1m6iNLVzzW/tiilQcTnFy02cB+nDbh5UNw8aHriUGqpnkprWJXmPCrddSxJ9eOWllLyWqhzf/LChkCjVo1uCnm6cOa08DHM3eWiDc+lDnv+8oPSNYnOGlS5FVzusWEZxWDHNimn1RTN0mpwR7oaVPeanZj03rp2hk5Mn05nfUeO2qGHYjaNGl2o5ihp6s6mxw1sb6DsP4YbULIYL5iV9GzouTPjBHbggLvEBfsysJVXoe3QyuEbs3zCOfcMSxZ9R0bZNUZ4kTIgKm1zhGVI+a4oYFtt87i/R2mCQGH7mz2aNRaVNvlRuLvZS+PQHdH/uXrKiM1hTT0wCOTn5oqCm3pbXRhRiwNCv4perUo+49JnESw2h7mFp8qLbJbzE319clWlQashWPmkofkCpoUjM6fMcoW9576UIUVnl0aCSA0dO458g602MMjFrRnxAcylZ+U5O0vPoKEo/lycUe8DF3XvpzuuDgDOsHWASAe4skOPwIifNopnDId5PyH3T7r+o93V5b2qD4J7x2NyLxtWDgtpXFHXvQaEpFN+2L/FIjFtaib7l93G7Yly1zT5vLD2SzUXpLMa4KNV1OUHacqrrWwRyFarrcgaz5VT/xP2X9X7VLokWUX1ol+b6w9vF+pZU87FYV0tY15W+Yqp2ehwGeVMxr0t1OdPQdqpvSQ5ch+rye4e2U73e/Zf1/g7J+ZumejmDc8NU37Lx/9ST9cGBk3VLvTLH5Zxi2zm+JQt3HY7LKa+2c7ze/Rf1fpLQbyvHSzkX1b5djJ8ndW4NTpVz0Urzcd2+LMcNeaHZco7HnbchHDfk5VDLOf6J+y/r/R3+c+xNc9zIg7xxEE+8/SnFt+3KOfYFaHkfhqL0ldKOv51BrlhFkJumITV2bpbLy8u2s3yLMK7DcnlJ1HaW17v/st7f4QeYbprlZq+QWxkaN8zzLTm541Pm/YGVe4FZALJhm4eh3S6TvZyzOTfW5f0Vbce61SCsm/IGi7Zjvd79J/I+L2Y/Lxn3lexXOvXH/wE= \ No newline at end of file +7Ztbc5s4FMc/jR+TQciAeUzcJJ3Odqa7mW4vb1qQbTUyokL40k+/wggMAseXyMSZ4BfDQT66/H86EpI8gOP56oGjePaZhZgObCtcDeCHgW0DYLnyK7Osc8vItXLDlJNQJdoaHskfrIxFspSEOKklFIxRQeK6MWBRhANRsyHO2bKebMJoPdcYTXHD8Bgg2rR+I6GYqVrY3tb+EZPprMgZuH7+ZI6KxKomyQyFbFkxwbsBHHPGRH41X40xzRqvaJf8d/c7npYF4zgSh/zgLvp3BtIleFrCERnF3785XxdXyssC0VRVWBVWrIsWwFF4kzWkvAsoShISDODtTMypNAB5mQjOnsrGkdW6bZZMZYPDWmOrcj5gNseCr2WC5baJHdVss0rrFjaOKRJkUZcIKaWnpbsyhy+MyJLYVkGlp/woJoeF38JFwlIeYPWrapPuc6T5EYhPsWj4kReVWm9NG8GOEM9+l+IN620O3RPF8/w9js6sntNQ7x88RySStjGKUUAEkYFPF1TglWiTcMwo49ISsUimvJ0QSjUTomQayVuKJ5mHBeaCyDh3o8xzEoZZJrfLGRH4URYgy3Epo7q0cZZGIc5qY2XOWSRUpAbOc8hkeeDVs9AUT3Ux1G2FKbeFKWjtxqem17HiuPu7VpDyxaZJMhEq/Uw1d0Whanu5eeJ7kpUnb82KVAMbTiYTOwgausonofuf67iNTjt8C51WC5S2b6jTlo73dFopDlpXksVZgmR3gUvaVD6Obz1brnIIKco1rKWXF3kJjEaQUQukLs06N6mh6v5Os0nGhsOrZAPijUwAnHi1Yad4Lq+m6psW6V/kCIX4d4qCdeFQ1jL3Wc9HmsnWZjzgBbJTYH5xIW+0g+RKj/RaeqR9rpBXzLgvFyeKgif5nE2y6l8EWjzX5bLJgv5rkzXcP5pWBlAm09VlCVEyK4fa7OYLErJLRxuLBKZ1IquPutl9ZTQdy49l1UZj8BZGUl3cU6e/DUe6/Gee/oLm/LeH4kQo5Pzj2jGDhSxTzdGwaywOmHn3WByIhWsoVtguqEPR8asy8HoojEGxb9njYCj8V4ai7fWnh+IkKCA0BAWEGhRdDx9+D4UxKDxTUOiOul4nb3uz7aE4CYohMASF7qh8a+0KiuO2vnoonoPCMQWF5qjzHTW7h8IUFI5lCArdUXk2oCsoYA+FMShM7d7qjoC+o3RmKGATChLFqXg3O7aermTL/kWnW7awucr8WVYbX40RpyzTJMAR4mRzieYxJdH00sWyy3tVxJZTP0crpw+yblO48n2wG+X6pWBzAVZb8wOWf+05fuVzWrx1wXF+zx1+25aJj9mshDv2GIsgkbzNHURoJrgPCzlLtZsxog3is20hwrYVYBN6T1koH0/SKBCERe9bdG2ZBthN0T2/S9GPW+F9jYNXdVHe3OBgyyDuGhoQWnwdfRjrBcPEQ3z/6zECn37CTx9//bB+/v2XJa76Nzdz7Jg6eKA7Audb+Gtloj+MYowJ1zbEhO7o0GOdppjoX0DMMWHq0IHuqGsm+oMoxpjwTC0F6466ZqI/h2KOCehfF+d6XoxF05c5MuTt9k96efLtXx3h3f8=7Ztfj+I2EMA/DY89JXH+Ph7Z46r2Wq1E1d49nazEkFxDjByzQD99HWKTxM4B264PW4IHFE9sJ57fDB57zAykm8NHArfFbzhH1cxz8sMMPM08LwIR+24Fx04QOk4nWJMy70RuL1iW/yAuFNV2ZY6aUUWKcUXL7ViY4bpGGR3JICF4P662wtX4qVu4RopgmcFKlf5V5rTgUjdM+hs/o3Jd8EfHHh/wBorKfCRNAXO8H4jAhxlICca0u9ocUlS1uhN6+fLnH8m39S/xDoW/fi2a56/k+OmnrrPFa5qch0BQTd+2a6/r+gVWO64vPlZ6FAokeFfnqO3EmYH5vigpWm5h1t7dM4thsoJuKlZy2eWqrKoUV5ic2gKHfZKkleOacttwPV4e1EvZx2m7h1W5rpmMdEDmDSX4byRq1rhmPczzkjBLKXFbscG7FtOcjwMRig4S+Ctac88omQsgvEGUHFk73osv6HPzBzEv7wfGxEXFwIxc4SaQ2+/63HXPiF1wTK9ABnQjAyAMrUbmm4bM140sDNPUamShacgCvciSJE0XC6uRxaYhC3Uja2czm5EFQWAYskgvssUiDO2ey4LINGTiaTonMwBsZhYGpv0yuq5uZotFbPVPY5gYx0z70oyzsJdZZBwzzWuz+WlGs5lZ5JrGzFOZfcIwV7gxRdAxnEllDolxkYCQMcUhJp+3ai0zWL3nNzZlnrePmbSGsb0MwccqeMfhMaoufr4j8QNT/CYAetr4qYvrFMPqwW86ngTX+cU/FJ+60G5wBcmD3/ScJ+9t3d//Yt1xiu2bW6Hkc0F49zkv0cvM/q0SeQ13f2bCiB6x5a3rAQOYac/V2J5eAyAwjZn2ZI3t+bXAPGbat/6tz9aYx0zz3r/9MQgIjGOmOda3P1/jG8fM1xw32s9MTosawMzTHYPYnq/xI+OYaY/1bc/XyLlsA5hpjvXtX1MD85jdEINkO/JyQtYyQXX+vj1HzIpZBZumzMbMctgU58pt4RlSikh9knhOv9Msjg97EtBL+8BM9+T4mRvPqfClLbxjauXlp8Pw7tNRlA4l/SxGwK67ZgEv9Y3agmjT6QHlyoHoW0yAKRDvSIYuqb6rRyFZo0v9dVvCqk0NbCaYsBkhI6iCtHwZD2LKjvgTnnHJhtebrJTKD3zJFLtx8la9NSodyedI/UTqqFOE0hGzNngcVNu2FZrvv7AfSj7mORffS9lPHtdnF90b9D52ZvA/3O6GLWPz3c6z0e3CW90uuafbKVG0SMy+1u0iz3s37iqUPfiNHC+QTh0HzhXHC+JL9fU4XnDD2S2DHG/Sf6LLDvQ9b73mrLodTxznuep5wJnd0fMi8Z7CLN3/6HlhJNm3pglPfeErficHoe6P8Dt1Dd5kNROIf4M9jiZISwXpaNc5HXe3owmBuiTvELoPhNO7KuYhVFfoHUJ1EnwgHP42GoRw4oTXCeHvD4TT5xgc4xCq+dVlsVutKqZqz1nCzbYq67UGnBVa0TeD6UUXYJ7q8Vd3dZH1pDWS7wQK2eRCoPgKsKzY/zO9i4n6v/eDD/8C7Vjbbts4EP0aAbsPXViib3m0nLQF6rbeeJFe3miJlthSokCNYrtfv0OKkqwL3KZZo1vAeQg4h8MhNedwOLBDlsnhlaJZ/FaGTDjeKDw45NbxvNmU4H8NHEtg6rklECkeltAJsOHfmAVHFi14yPKWI0gpgGdtMJBpygJoYVQpuW+77aRo75rRiPWATUBFH/3AQ4gt6k5vmonXjEex3XruzcqJhFbO9kvymIZyfwKRO4cslZRQjpLDkgmduyov+Xq7uXvDPsPD/SbcLt4VD3+LF2Wwl09ZUn+CYin8t6G9MvQjFYXNl7P0nIXPU2BqRwOmTd93vKnAnf2twlGkRzVSAe8z4Amyr6oZPMm28TYJhGPFSgyJwJGLczsuxFIKqcwM2Zk/jcsUrJyQktI+8RuZP8Sp4FGKWMD0mevNHpkCduho4DsJdGtW8TYwmTBQR1xno5C5FYK9CTNr7htZ1aqPTxTlViC1Uo7q0A1dOLCMPUUYbi+xLET1W1MqiGUkUyruGtRXskhDpsPq7DU+KykzS8kXBnC0uacFSIROCGMHDh/18r8m1vpkg+nx7eHUOFojpHls9nStsaaAbKUG8UYmbBou9H3XB89YWiIvuc6ICZGDkl/rO+w9QSFIujqenFibn6qP0UZzZmNVhy6TqzP6EzJCVmShAnbGz1ZWoCpi5+J5w7JUTFDgj+3DDWnMLl1LvNWNnMezeUvO7txrhyjPZVd1lFof4+fFS3qlZ7U+KSIdXed7ngiasop2O+MOFQB99Tm+AAs7AVrYfhBzEa7oURY61znQ4Gtl+bFU/BuGpZXGcVpV2hqPWx4bvdIqRLEcfdaVINwO9JYeWo4rmoMFAikEzXK+rT8jwXTz1JcAMrFOzy2Nl6qELpm0pEPItFcKCRkohZXkzqj0HpsAmkaYlma7Ubvw3vQLrzsd2G3cqbtU6JJDgfm6BOaXEPWk/552lSy4UXG7nHUfwlQap0ragu1gQNgJD0NT0/OMBjyN/jEV/IXbICuz8JY0yL3NDzFPAVCgpQS1XgTdMrGWOQcudXxV+vqZrgEmiRPfmdwaRKHiUvwIyo2QGEp7z3J4lurIsOos7XgPf0hkZzT2rNd2OsCtb+q8eGR/5FCExz8dsjCVOS8E9JjHfEDNfIfpp5NfVrU2z+M+zxqSuHYnzNMao2TM49rlvs2xj3ld6ucS2faWaLuN/csEMPF+TADe9FL9Vv82X/ut36ffmv3ifqv9irk3nefpwv3WrFe+7lkigV17rt+i55qMrj3XcN7m157rGaqb/a97rptrz3Xpnuu8AC7Yc6HZ/HhaFoPmF2hy9y8=7V3bcts4Ev0avWyVXbxfHmMn3t2qSbwbz87sPKUoEpKYUIRCUbGdrx+AF0kEYBm6gEAMKFUxCREg2KfZp9FoQBP3dvn0zypZLT7CDBQTx8qeJu77iePYfmShP7jkuStxoqAtmVd51pXtCh7yn6Ar7CrON3kG1oMLawiLOl8NC1NYliCtB2VJVcHH4WUzWAzvukrmgCp4SJOCLv0zz+pFWxo54a78XyCfL/o720HcfrNM+ou7J1kvkgw+7hW5HybubQVh3R4tn25BgaXXy+VT8XF+9XPz6esf8D74728f/mfX367axu6OqbJ9hAqU9YWbtp228R9Jsekk1j1t/dyL8HGR1+BhlaT4/BHpycS9WdTLAp3Z6HCWF8UtLGDVXO3Omg8uh2Xd6QOSdnu+d53VfFB5UuTzEpWl6OkA+vKm6xGoavBEgPiKBOwtLEihAVyCunpG9bpWfKtrp9flXkcfd4rhBl3ZYl8pwq4w6ZRxvm17J3B00Mn8KPm7l5Z/loBolg7lb0csOa/rCn4De1WDNALTmWQEYgYAvjj5exeXvw+izDtJ/pEzdYNApPxj5eTvC7A/Tnqa/mfBNPBFyj+IlJN/cHH5Ryk4Uf7TyPd8S6D8w1A1+ffi1oJ/+5e9l74rn38dvfiXA4GR9V8r/g0s5eSvF//GyslfL/6NVJO/HWrEv5Y1kL5nyedfpJk68S8HAiPrf6wT//qxavLvg4ua8G+knPxtrfg3VE3+lsbjX8+Rz7+W1uNfFgIj67/W418F5K/1+FcB+Ws9/lVA/jqNf/tgyzb638/1yyRgvQbAPBCM/AZoNQIObOUAsLUaAoeWegDoNQaOVQMgujgFKMzBHin9SDoHR5dnAJUpmAeBUfU/vjgBKM3ArnLyv7z9V5mAKRdUuvwdnfg3ojxQ2fK/vPjVpd9+wr0Xvk8HIEZmXzoAXcIMH82qt8zCHECM+hLQcWgk6iU6KmCSvWU6dhQDgg5Ir1MsI+tMEJTmZFsxEOiodH0JCFSmZUsxCOjA9Peakn+1gMvpBt365kgkfPyPgQTB0O2HjzKaj0jKICbvHcbkQcRAKBKFEB22SOGaxugVZJL1ql33N8ufQPY2oAo9xaASsMzjl/FxPU+6j8vIMtXRyWUhMW6omzZZenq58pGgY676ubnSUWBkn+rm58rHgI6+Gkd3AJHPWKU5qvfUO0vG06WJRTms6HjiHwntZKGW8tUavP4+vUXQyDRw+aCxsmCDou7EiL6Ztfig0u8bvOHKze/5EqzRF5/AI/r/M1wm5e5LdDTv/jaNTPsCOP3al6GOTveuS5YY+HK6XjXnFl3UVluv8I32VKm/J+7oVVf8Dl1RwmqZFHSflnm514W2uSN6cXrRP8S2/4t1f4zutrpXUdpIl/QFKSwxr5yooRcuknDTqzf3UIKfSOzjNPbtBZ1F2tna5r6YYFjEWbh8j0qHRFfCEtPvPp12RT1DFmCGW8D8l6dJ8a4rriHuG4u2K7gpM0zS7/EIgjcw1VzX9dsRRblkKCRkhNfD/ppBUKofn1yedDmWPgh1k1ioUG5SpwQSPFvPfxUxjwGYOLw4Yrga4xXGquHFsbRCY7xi/3WLOC5eHGm4GuNFhpAjxqBxXLw40nY1xosMNkvHq/duDF5c/CUfL460YI3xikLV8KKj1AavPX8jUM0ecmy9oDFetuWqBhjHXg3DEMT5c3VqYxQ6ivnwLkdMQy+ISJqSDxFHGEMviChLJx8jjtCFThkJtq3a4NdlDH7dO4H+wyxN45h4qxwudNzAjd3sQAB++8Matkg7SLxjfkAjyIq/u6IQ7FXDIHhiqsk2i0EagowBskHw0DtIuovSEWQMmQ2ChwbRkWoIMgbRBsGD05qEJ8P6EYZREWSMqg2ChwbdZFxEOoKMQbdB8BAPklMz0hFkjMkNgocQjFVDkDFiNwge4cnIR5CRfmAQPBg281WDkJWRYNLYlc4DN2nsbz2NfWyE32Rau0ljN2nsE7x55JBy4352UV4ae+8EmDQKdrzH919FbNSZK9+klR0cWVqq4cWIkRu8Xkwrk4+XSSs7aA/JjRctVzZgHGllGgNG5pgpAJhZOHcMgykAmFk5dxAw8ld55QNmls4d9DlC5UyiWTt3OFTtKYcYx+o5vdJwyXl16Z58wBHZ0AsikqnkQ8QRzNALIsrUyceII4ChVzK7ckPgkPUa8UxUzpJlXjy3M5XbsD+Wt4f+Ztn8y6qCT/kyKeGPhPq+qXPLUfPLJv9CX3RUdfu86s551d3zqnvnVffPqx4cUb2rOsHz1kchvgDFD4DnZ3BVgJTwpKrcddZgDtE9rE1+RKUyb2wUehmSEv8pjqhb5FNQJXUOy2197rpwBU6oNasAOLJKUuXJgYfCrV2tQZXPcFHzRZut0NhdbAJs79pbPTESKD44k/h2EnsvpiyIGqtY1t1dRE7j/UpZNgGRJxXHNFeMmmQTsujccIXhCsMVhisMV6i0xlI+V7DmQg1XGK4wXGG4wnCF1PUXxAoa+Vzx8h70hisMVxiuMFxhuEJSDMonE1R82WTBSgEzZGHIwpCFIQtDFlK3x6BSraSTBSv91JCFIQtDFoYsDFmoNGOhAFmwUt8NWRiyMGRhyMKQhdQpC2LbNgXIgrXsxpCFIQtDFoYsDFmoNL+tAFm8vDudIQtDFoYsDFkYspC1lSmxi5B8tog4VrYeuVay+RAoHdrRjkJPlPj9fhlvL33HimnpBwzx26Ew+dPLXEqY4aNZdSYQWQKiWToEolm0yrE4NY3AdCYbipiBhC8MCDqHHAl7iY4KmGTnQuGDKPNOgiJypq7gnQOVg4JO0VynWErW+abJSU97I7JgGvhCt6XwlIOBTn6qLwFClIITQZhGvudbAkGIXOVAoJMKvtcUAjrtaxBYJEYhjVHEwCgShhE9l5fCNY2SgP2QVAcrDFUDC/WAQuvf5WpTf0xWK0D7XEdt6zIYRRDw3TUfxuhFul9sE6MS17EojByHBZIvDCSejcbK7F1VNeJOi2S9zlMCjEbb/+yewT1C8kiY1fP/MeTXfn/6V6cBzcn7p8HZc3fWdhFkc3ACPI5VJ9UcHLyu1V0ayD2g/APsVIEiqfMfw96xwOvu8B+YNz9n0TvtFjF6jYmXdA03VQq6WjsVoBryyB/ptImGWklQDSG4k+e9y1b4gvXLHXbIDfS6Du9Us21xp6hbmZ6ju3QA9qHeZLjjcPoVm3hSk8/ffL5qwaZ2n1/mWVa8FHoZmrah7eJ8U0bZjd4hJ2RZ9sli6H0gjERcjrDJGPZpYJ2uHf8VAwWe8nrPrqGzv7rO4ONdJXxyIaPW2gQeQn7d/NlSzZ9L0GRgWdfW3sc5zRjaLtGMO7hL6MTX8d4nGsVSet1Ppoq1lC4d/PpQgCVoukr+ksdjjjB2rE35rYSPTaUqT6YFWBtzep675zFi0Mx94QSaUzr4dt9QJXpbbzZ5kWHHnFSIdxn4vknS5+0FGnruxK5NQcCAclzP3eXZ5v5tee7y/PHh4Np3/UsQkhParGa2KuaNwkfUo43BRzw7/huvTprCH9bM0HeuvZc1k1f9ybcqjMZxwOi7jqLydHwZrup8+bPPb1hVEDlZS+yfw8adLxB8xuM6y+MKY84Am0CPi46C3n+++v3+/rcH40jhOR7pIVBXagj0l+QH0nRH5PtzhAvEVgfBVr/vsGCbz/GbEALdHOOtNCp12K3g9laoxSrCYuk2+0ZidbWPQ3D4J41rkhnf5DzfBOHKGUMQ55z0qTCG+k6fm7PIGXRu7guYhqlvdzv5Inqurn8Awfalf7H2/OBNbdIBhpB7fBZBnDPs8fzaqXFZzndZmFNaVDbc0V6KQ6iUQyZUi7IifYFgK0IHDj+D9abAPTNz/ifN+VMaE9GJY5ea9EenFcSLXHZKgaS1+AgzgK/4Gw==7Zxbb6M4FMc/TaTOQyOuCTw2SWd3pM6q2lQ7O/tSucEhVg2OjDNN5tOvAXM1obmHIHiYxgdjwjl//2wfPOnpY2/9BwXLxXfiQNzTFGfd0yc9TVNNS+F/QstGWDRrEFtcihxhywxT9BsKo7jQXSEHBoWKjBDM0LJonBHfhzNWsAFKyUex2pzg4l2XwIWSYToDWLb+QA5bCKs6sLMTf0LkLsStLW0Yn/BAUlk8SbAADvnImfTHnj6mhLD4k7ceQxx6L/HL++p9/g39Z25+MOfeYtN/NqvlfdzY130uSR+BQp+dtmkRy18Ar4S//uIS+OY7cN3TBpjfbTRlFPkuL7lhSbiCbRL/Bh/Iw8DnpdGc+Gwqzqi8DDByff55xr82pNzwC1KGeGgexAlGltw6WyDsPIENWYUPFzAwe09KowWh6DdvFmDRJj9NmVCZoRVqTMMruVnhVgoDXuc58ZhaMn0H60LFJxAwYZgRjMEyQG/pY3iAusgfEcaIJyqlUgjPzxHGY4IJjRyif42OxB3xNw3FEpVz9ZToSF0a+gauS5r9JOBqqkLefyHxIKMbfp1oZSB0K3quoRpx+SPrBtpQ1FnkeoApbED0PDdtOVMX/yAEtofYLElskpowipQUMEre0/5adrJPokqJvDCcswpxechxcNTYEsy4gl9CsU3u1czyFF040TPL38ILoYkSBhiIZRCGCYM3iJ9JgBgiYfs0rjtaEuSzyFXmqGdOIgvlgfb5QwAUxQ9yeX3AgB0V7EF1sJPoarsF1zpTbBPc50kCvI4krSCJ3jiUqGrHkmPCrVTHuykwMaToTumsY0kbWGJbjWOJ2bHkmHAbzWaJvMSZ8EfuYNIGmKj8Fk2jybCjyTHxbvYyR5OCWwDJSyMZonQMqUuTlCYkalLOa8ys0JhqnIshegVCRuGNwmYe+Ic3QjAEPlcb95Aae2cUJVHxyvODuFIgRjUl7LLhvxNJldyVLAVRCTz7syjWaRE7hoyd0ET4tXMcaWPBCQb9ChQVkTPizh4rfTN6jDEvq1n5jDzS6nG0o1TOphR5odQNNqcK7q4cONdYU7UsCTvy66sLGWLQe329C/+I7v5S7OnKl9i8bXwqkQMFrwBzEdx9qScMFw0fpO6ceVzN4fGcU+BBcVkAKYKFizrinFaU1m6iNLVzzW/tiilQcTnFy02cB+nDbh5UNw8aHriUGqpnkprWJXmPCrddSxJ9eOWllLyWqhzf/LChkCjVo1uCnm6cOa08DHM3eWiDc+lDnv+8oPSNYnOGlS5FVzusWEZxWDHNimn1RTN0mpwR7oaVPeanZj03rp2hk5Mn05nfUeO2qGHYjaNGl2o5ihp6s6mxw1sb6DsP4YbULIYL5iV9GzouTPjBHbggLvEBfsysJVXoe3QyuEbs3zCOfcMSxZ9R0bZNUZ4kTIgKm1zhGVI+a4oYFtt87i/R2mCQGH7mz2aNRaVNvlRuLvZS+PQHdH/uXrKiM1hTT0wCOTn5oqCm3pbXRhRiwNCv4perUo+49JnESw2h7mFp8qLbJbzE319clWlQashWPmkofkCpoUjM6fMcoW9576UIUVnl0aCSA0dO458g602MMjFrRnxAcylZ+U5O0vPoKEo/lycUe8DF3XvpzuuDgDOsHWASAe4skOPwIifNopnDId5PyH3T7r+o93V5b2qD4J7x2NyLxtWDgtpXFHXvQaEpFN+2L/FIjFtaib7l93G7Yly1zT5vLD2SzUXpLMa4KNV1OUHacqrrWwRyFarrcgaz5VT/xP2X9X7VLokWUX1ol+b6w9vF+pZU87FYV0tY15W+Yqp2ehwGeVMxr0t1OdPQdqpvSQ5ch+rye4e2U73e/Zf1/g7J+ZumejmDc8NU37Lx/9ST9cGBk3VLvTLH5Zxi2zm+JQt3HY7LKa+2c7ze/Rf1fpLQbyvHSzkX1b5djJ8ndW4NTpVz0Urzcd2+LMcNeaHZco7HnbchHDfk5VDLOf6J+y/r/R3+c+xNc9zIg7xxEE+8/SnFt+3KOfYFaHkfhqL0ldKOv51BrlhFkJumITV2bpbLy8u2s3yLMK7DcnlJ1HaW17v/st7f4QeYbprlZq+QWxkaN8zzLTm541Pm/YGVe4FZALJhm4eh3S6TvZyzOTfW5f0Vbce61SCsm/IGi7Zjvd79J/I+L2Y/Lxn3lexXOvXH/wE=7VrbcpswEP0aP7YDCAx+jJ3bdNpOZpJpm6eOAjJogpFHlmO7X19hbkarxpcY7IY8BS2wQuccrQ8bemg0Wd5wPI2+sYDEPcsIlj102bMs0zT68k8aWWURr29kgZDTIL+oCtzTPyQPFpfNaUBmtQsFY7Gg03rQZ0lCfFGLYc7Zon7ZmMX1Wac4JCBw7+MYRn/SQETFuvqD6sQtoWGUT+1ZbnZigouL85XMIhywxUYIXfXQiDMmsqPJckTiFLwCl4dFmOBbcTPFxo/fX93LL49P409Zsut9bimXwEkijpsaZalfcDzP8crXKlYFgJzNk4CkSYweGi4iKsj9FPvp2YWUjIxFYhLLkSkPAzyL1temg5ng7LlEXeI1HNM4HrGYcTlOWCJzDPMHIFyQpcLYluWaJQdSvIRNiOAreV+exSp0muu2pHFRqcCy81i0IQCzUC7OlReWuSt05UEO8B5gWwDs73K79ax+LKcePnF5FKZHF4ADmVzuF7IdfzybZptoTJcpD43BaxsKvDaE19Og6zUFrg3BJWLB+LMMQkAlGqKOXCZWRZwaveKYhokc+hI9IuPDFFsq681FfmJCgyD+F1X1zVTbLY1tBFdhCu3GlNUUU872mkOS4CKt/RXsNaiUgnOHhaQiWUcsA2krTzYFCcCPxWHYbmDnaLArYpzEWNCX+pw6QPMZ7hiVT1NSh2yFOnfw2aknmbE590l+32b1V1KB/QpTCcxDIkCqNcfl0g+nvb8X7X6MZzPqP0Q0qRNOllT82jh+TPeSXEo2ulzmW2s9WBWDJLimcZHhTVKQz7tGfFuZP5VkHHW3ewcKpq8KRq3cDcvFBXKR3F2gLldyBymUOLCS6xxNY5Xc2mFPvxP7iJwBwLpt+wi3hN4/DmFhPXv/iFyIb6v+0fIguqWBhIh2qOyoBhL1d6OqubIz2MtKfDjIijvb9I7lIDWpGrYExc9fByyke0rRqB7StuzDJKN6SNsctCsYEwjGlibSBKrpUDVXTSTyYDVv1UQi2BYD/LwTE+mY0LC3bCIR7PjqTeQIltazN5HOjq2txkwkeq0LCRHtUNlRTaSjace3aiLRRxvyUBPp9I/WhtSkatoTdKYPmZX6czGRjnukRmSZuC3B6DuRttPlcq66SEfz7792XSRs3wB+/lMX6Snl9wxakQh2YPQu8hesrWfnIj21v3XqVmTBpdZFQkQ7VHZMQ20bn7oXacMWw2t2oss2cuDUuXtDL9I0zK25GrYF9g5tC0i8xkAWLnE/A2msdYG5UIxqEW7TZw5OqSrTVGV1aLfSVdueLXcrbfhGOmLJuojyt3mZk9oX8CGB5s3fdDUCKd8V9yjaclh9kJkRU33Wiq7+Ag== \ No newline at end of file diff --git a/docs/source/mathematics/linear-model.rst b/docs/source/mathematics/linear-model.rst index 313fb6d..a11f38f 100644 --- a/docs/source/mathematics/linear-model.rst +++ b/docs/source/mathematics/linear-model.rst @@ -74,7 +74,7 @@ First constraint is from Kirschhoff law and describes balance between production .. math:: \begin{array}{rcl} - \Pi_{kirschhoff} &:& \forall n &,& \sum^{C^n}_{c}{\underline{\overline{\Gamma_c}}} + \sum^{L^n_{\downarrow}}_{l}{ \Gamma_l } = \sum^{P^n}_{p}{ \Gamma_p } + \sum^{L^n_{\uparrow}}_{l}{ \Gamma_l } + \Pi_{kirschhoff} &:& \forall n &,& \underbrace{\sum^{C^n}_{c}{\underline{\overline{\Gamma_c}}} + \sum^{L^n_{\downarrow}}_{l}{ \Gamma_l }}_{Consuming\ Flow} = \underbrace{\sum^{P^n}_{p}{ \Gamma_p } + \sum^{L^n_{\uparrow}}_{l}{ \Gamma_l }}_{Producing\ Flow} \end{array} Then productions and edges need to be bounded @@ -98,7 +98,7 @@ Variables Sometime, there are a lack of adequacy because there are not enough production, called *lost of load*. - Like :math:`\Gamma_x` means quantity present in network, :math:`\Lambda_x` represents a lack in network (consumption or production) to reach adequacy. Like for :math:`\Gamma_x`, lower case grec letter :math:`\lambda_x` is for cost associated to this lack. + Like :math:`\Gamma_x` means quantity present in network, :math:`\Lambda_x` represents a lack in network (consumption or production) to reach adequacy. Like for :math:`\Gamma_x` , lower case grec letter :math:`\lambda_x` is for cost associated to this lack. * :math:`\Lambda_c \in \mathbb{R}^T_+` lost of load for :math:`c` consumption @@ -111,7 +111,7 @@ Objective has a new term .. math:: \begin{array}{rcl} - objective & = & \min{\Omega_{transmission} + \Omega_{production}} + \underbrace{\Omega_{lol}}\\ + objective & = & \min{\Omega_{...} + \Omega_{lol}}\\ \Omega_{lol} & = & \sum^N_n \sum^{C^n}_{c}{\Lambda_c * {\lambda_c}} \end{array} @@ -122,7 +122,7 @@ Kirschhoff law needs an update too. Lost of Load is represented like a *fantom* .. math:: \begin{array}{rcl} - \Pi_{kirschhoff} &:& \forall n &,& \sum^{C^n}_{c}{\underline{\overline{\Gamma_c}}} + \sum^{L^n_{\downarrow}}_{l}{ \Gamma_l } = \sum^{P^n}_{p}{ \Gamma_p } + \sum^{L^n_{\uparrow}}_{l}{ \Gamma_l } + \underbrace{\sum^{C^n}_{c}{ \Lambda_c }} + \Pi_{kirschhoff} &:& \forall n \in N &,& [Consuming\ Flow] = [Producing\ Flow] + \sum^{C^n}_{c}{ \Lambda_c } \end{array} Lost of load must be bounded @@ -135,3 +135,154 @@ Lost of load must be bounded \forall c \in C^n \end{array} \right. &,& 0 \le \Lambda_c \le \overline{\underline{\Gamma_c}} \end{array} + + +Storage +------- + +Variables +********* + +Storage is a element inside Hadar to store quantity on a node. We have: + +* :math:`S^n` : set of storage attached to node :math:`n` + +* :math:`s \in S^n` a storage element inside a set of storage attached to node :math:`n` + +* :math:`\Gamma_s` current capacity inside storage :math:`s` + +* :math:`\overline{ \Gamma_s }` max capacity for storage :math:`s` + +* :math:`\Gamma_s^0` initial capacity inside storage :math:`s` + +* :math:`\gamma_s` linear cost of capacity storage :math:`s` for one time step + +* :math:`\Gamma_s^\downarrow` input flow to storage :math:`s` + +* :math:`\overline{ \Gamma_s^\downarrow }` max input flow to storage :math:`s` + +* :math:`\Gamma_s^\uparrow` output flow to storage :math:`s` + +* :math:`\overline{ \Gamma_s^\uparrow }` max output flow to storage :math:`s` + +* :math:`\eta_s` storage efficiency for :math:`s` + + +Objective +********* + +.. math:: + \begin{array}{rcl} + objective & = & \min{\Omega_{...} + \Omega_{storage}} \\ + \Omega_{storage} & = & \sum^N_n \sum^{S^n}_{s}{\Gamma_s * {\gamma_s}} + \end{array} + + +Constraints +*********** + +Kirschhoff law needs an update too. **Warning with naming** : Input flow for storage is a output flow for node, so goes into consuming flow. And as you assume output flow for storage is a input flow for node, and goes into production flow. + +.. math:: + \begin{array}{rcl} + \Pi_{kirschhoff} &:& \forall n \in N &,& [Consuming\ Flow] + \sum^{S^n}_{s}{\Gamma_s^\downarrow} = [Producing\ Flow] + \sum^{S^n}_{s}{\Gamma_s^\uparrow} + \end{array} + +And all these things are bounded : + +.. math:: + \begin{array}{rcl} + \Pi_{Store\ bound} &:& \left\{\begin{array}{cl} \forall n \in N \\ \forall s \in S^n \end{array} \right. &,& + \begin{array}{rcl} + 0 &\le& \Gamma_s &\le& \overline{\Gamma_s} \\ + 0 &\le& \Gamma_s^\downarrow &\le& \overline{\Gamma_s^\downarrow} \\ + 0 &\le& \Gamma_s^\uparrow &\le& \overline{\Gamma_s^\uparrow} + \end{array} + \end{array} + + +Storage has also a new constraint. This constraint applies over time to ensure capacity integrity. + +.. math:: + \begin{array}{rcl} + \Pi_{storage} &:& \left\{\begin{array}{cl} \forall n \in N \\ \forall s \in S^n \\ \forall t \in T \end{array} \right. &,& \Gamma_s[t] = \left| \begin{array}{ll}\Gamma_s[t-1]\\ \Gamma_s^0\ ,\ t=0 \end{array} + \right.\Gamma_s^\downarrow[t] * \eta_s - \Gamma_s^\uparrow[t] + \end{array} + + +Multi-Energies +-------------- + +Hadar handle multi-energies. In the code, one energy lives inside one network. Multi-energies means multi-networks. Mathematically, there are all the same. That why we don't talk about multi graph, there are always one graph :math:`G`, nodes remains the same, with same equation for every kind of energies. + +The only difference is how we link node together. If nodes belongs to same network, we use *link (or edge)* seen before. When nodes belongs to different energies we need to use *converter*. All things above remains true, we just add now a new element :math:`V` converters ont this graph :math:`G(N, L, V)` . + +Converter can take energy form many nodes in different network. Each converter input has a ratio between output quantity and input quantity. Converter has only one output to only on node. + +.. image:: /_static/mathematics/linear/converter.png + :scale: 80% + + +Variables +********* + +* :math:`V` set of converters + +* :math:`v \in V` a converter in the set of converters + +* :math:`V^n_\uparrow \subset V` set of converters **to** node :math:`n` + +* :math:`V^n_\downarrow \subset V` set of converters **from** node :math:`n` + +* :math:`\Gamma_v^\uparrow` flow **from** converter :math:`v`. + +* :math:`\overline{\Gamma_v^\uparrow}` max flow from converter :math:`v` + +* :math:`\gamma_v` linear cost when :math:`\Gamma_v^\uparrow` is used + +* :math:`\Gamma_v^\downarrow` flow(s) **to** converter. They can have many flows for :math:`v \in V`, but only one for :math:`v \in V^n_\downarrow` + +* :math:`\overline{\Gamma_v^\downarrow}` max flow to converter + +* :math:`\alpha^n_v` ratio conversion for converter :math:`v` from node :math:`n` + + +Objective +********* + +.. math:: + \begin{array}{rcl} + objective & = & \min{\Omega_{...} + \Omega_{converter}} \\ + \Omega_{converter} & = & \sum^V_v {\Gamma_v^\uparrow * \gamma_v} + \end{array} + + +Constraints +*********** + +Of course Kirschhoff need a little update. Like for storage **Warning with naming !** Converter input is a consuming flow for node, converter output is a production flow for node. + +.. math:: + \begin{array}{rcl} + \Pi_{kirschhoff} &:& \forall n \in N &,& [Consuming\ Flow] + \sum^{V^n_\downarrow}_{v}{\Gamma_v^\downarrow} = [Producing\ Flow] + \sum^{V^n_\uparrow}_{v}{\Gamma_v^\uparrow} + \end{array} + +And all these things are bounded : + +.. math:: + \begin{array}{rcl} + \Pi_{Conv\ bound} &:& \left\{\begin{array}{cl} \forall n \in N \\ \forall v \in V^n \end{array} \right. &,& + \begin{array}{rcl} + 0 &\le& \Gamma_v^\downarrow &\le& \overline{\Gamma_v^\downarrow} \\ + 0 &\le& \Gamma_v^\uparrow &\le& \overline{\Gamma_v^\uparrow} + \end{array} + \end{array} + +Now, we need to fix ratios conversion by a new constraints + +.. math:: + \begin{array}{rcl} + \Pi_{converter} &:& \left\{\begin{array}{cl} \forall n \in N \\ \forall v \in V^n_\downarrow \end{array} \right. &,& + \begin{array}{rcl} + \Gamma_v^\downarrow * \alpha^n_v &=& \Gamma_v^\uparrow + \end{array} + \end{array} \ No newline at end of file diff --git a/examples/Multi-Energies/Multi-Energies.ipynb b/examples/Multi-Energies/Multi-Energies.ipynb new file mode 100644 index 0000000..3acbadd --- /dev/null +++ b/examples/Multi-Energies/Multi-Energies.ipynb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6703aead548df1bf7ab37dd941ef97552fb77f8a67189e25b13b04f298a825d +size 3563273 diff --git a/examples/Multi-Energies/figure.drawio b/examples/Multi-Energies/figure.drawio new file mode 100644 index 0000000..f0af11d --- /dev/null +++ b/examples/Multi-Energies/figure.drawio @@ -0,0 +1 @@ +7VpNc5swEP01PqYDEmBzjJ24nc406UwOSXvpqCBjTTByhRzb/fWVQBiQsEMdY9wPX4xWYpHee4uWhQGcLDbvGVrOP9EQxwNghZsBvBkAYNuWJ/6kZZtbfMvKDREjoRpUGh7IT6yMxbAVCXFaG8gpjTlZ1o0BTRIc8JoNMUbX9WEzGtevukQRNgwPAYpN6yMJ+bxYl+eXHR8wiebq0iMwzDsWqBisVpLOUUjXFRO8HcAJo5TnR4vNBMcSvAKX/Lzpnt7dxBhOeJsTFl9TK3iOoo+L+y93/t34p//t6kp5eUHxSi34TlA4AF4sfI6/M3EUyaNHyp7VMvi2wEb4FzSIxng9Jxw/LFEge9ZCCcI254tYtGxxiNJlzs2MbLCYzlhdFTOON3uXY+9AEurCdIE524oh6gTPV7gqYUFXtdclTSNlmlcJUjakhBHtPJfYiQMF329ACQwoCxhnVCypipz3Y0WLjqs00/y1GADc5absLKCfFG7ErHJPud2gowcSRo5GgucaJLgNJLhdkQBb6vmasMuSMxzVkXRcp185Ox3J+fMFyxkMNRKKu3xfcnZbyvmexJctZ+D3LGfvv5wBtHqW87CBBA0mRldJKAG4sV5XbojSeTZWNlLO6PMuSXN2lgmNKcucQ8saTqdTSS2J48Ke0ASfCG+oib4hJQFeA+C20xXiozfL3h7VZS8y7QLP0iiRnc1mZngkmK+z1NGi8hbVLlQEA7xOdJ1JxVgDiSgmUSKagWAMC/tY8klENn+tOhYkDON998S69mriOoU6gFtXh2eqYyeEqjpgV+LwzxuODXw1RKgvnxA7CUfHNvegM4dj8VR7hnjMcDwQj0hmwMfG4x8eeg4wlXDe0LPNB+/eY286HVmnij1PTz0uIPbe/oDeLvZmswzHA7GX//3Lm+HQ0IeZmp45Is3SgcFFsGIvOxRwEl7LAqNEOEZpSoI6TXhD+FPl+IuE8p2rWjcbhWzW2KpGfkUcGjVJDVcxK7piAT6wHrcZ/5apP8Mx4uSlPo0mzNUVPlOShZCi13U1eoFGWz59dRaoFC01R0aVz9ccccQizA1Hghm0rQxbygHpgQnrhSx/qCkq91jqa4fpGyTXVGM5t+SEsNj2qdqonCWb5WlZqwOpqsjLqTyEV5+SdvTakHWkpIeW5siD7yzolz+vG4Xrl7Vqrws6UrhZwJrQJNs5mFnFuk0iInYuPQRaF7Lqac+JdirjzuA07FSgQXedlbLsFmWUyp1CZQOXszN5LcO93x1Mo93R6Wwb7q62gzl6BrMnvk8Wgk0loK7VcgGbitNSZbBPlUGo31yc41QG9TxJd9S1ylrUkv5GlRWfJLymMtCnyob6u0XgH6cy/XWv4eholYlm+SVEPrz8ngTe/gI= \ No newline at end of file diff --git a/examples/Multi-Energies/figure.png b/examples/Multi-Energies/figure.png new file mode 100644 index 0000000..55027aa Binary files /dev/null and b/examples/Multi-Energies/figure.png differ diff --git a/examples/Network Investment/Network Investment.ipynb b/examples/Network Investment/Network Investment.ipynb index 3b8d1cb..175731d 100644 --- a/examples/Network Investment/Network Investment.ipynb +++ b/examples/Network Investment/Network Investment.ipynb @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78d3c267f9649cfa734c2b412387747861f17822060c3253910b924fa4af176c -size 8669490 +oid sha256:1410fd3904ebfb488b2e0b242d0a0ab233ff7fba34b80e4e0640d6a332ac7cde +size 8669491 diff --git a/examples/Storage/Storage.ipynb b/examples/Storage/Storage.ipynb new file mode 100644 index 0000000..ba165eb --- /dev/null +++ b/examples/Storage/Storage.ipynb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33de22c622c60a3f7bb2a32c717b149841c4ebe0917e04e73d2d843c8dd99ba1 +size 431508 diff --git a/examples/Storage/figure.drawio b/examples/Storage/figure.drawio new file mode 100644 index 0000000..ad2e540 --- /dev/null +++ b/examples/Storage/figure.drawio @@ -0,0 +1 @@ +1VnbctowEP0aHsPYkm88Arl0Juk0CdM0PHU0tgJqjEVlEUy/vjKWfJGdmDBcnCe0K3ktnT27qzU9OF4kNwwt599pgMMeMIKkBy97AJim4YifVLPJNAPDyBQzRgK5qFBMyD8slWrZigQ4rizklIacLKtKn0YR9nlFhxij6+qyFxpW37pEM1xTTHwU1rW/SMDn6lzOoJj4hslsLl/tATebWCC1WJ4knqOArksqeNWDY0Ypz0aLZIzDFDyFS/bc9Tuz+cYYjvguD4z4feI/MuKub8Hv26fwcjpjF9LKGwpX8sA94ITC3uiFCrNi13wjoXD+rqiauIi3jhqKBdBYJsWkGM3S36EyI/aTWcr0EorcKBB7FK4Uwmg9JxxPlshPZ9aCTUI354tQSKYYoniZ+feFJDjITZXPrw6DGcdJSSXxuMF0gTnbiCVyFigySnIKdmbyunC1J1XzspOlDklyzXLLBf5iIF3wCXeAI7lj9CXc4cCOuQM2uEMHLAqGaZoRkh+iOCZ+FSecEP5cGk/F2OjbUrpMT24oYSOFd6GM6Yr5uD2cOWIzzNt5hoNK8qs7pgS83QC80jEcIk7eqimzyRvyDfeUbNks/W5bVb9bukOzc8unyilONzTQDEHNUAZMzdCWHPmx9+eLZdUIcyNerXOG0VUUpEGz9XZLoKXhKsui6R4ozoAgYBknaA9qgebaDf62jxRoll0D7oqGBEedw850qtDlFDsbdE4NujuKgs4B55rV2DQH9pmBc2vATTj1X9MLYefA81w9ZA2rBt8gX3QSAFX237E8RjRK7xWxyMFc030AnqqfRc2clktmY/0UOLPNc1koVd1ULB7bSuq57dauSYqCnA1K0v5VWZWF1rJsdqosW65WTY19yzKoGoL2acuy3dTldIuoBTen6rJ4DqLaX5KoplZaoGrOP0tUoBuyTkzUpv7vWEQ1vyxNwY4sVVejjtDUhdUSDrw9aerphpwT0/RzffEBadrSOOdEvTD6htmrFH84aKPrVrrHjAh8MOsKh91ucdjw+sCzxG1eXA8sRxlQ+dI0+65pu8BzbWBDcWvdj+CurV93j0hx94H9jH8M794mj/jWW12/Pv15aPguGq38ECNWI/oBWoNaH9BAqXdbA+2TR/65udwXHKkraETubCVs19ygFTGjJSvskgC04GzNCB9Rbre717niH2if6gDcM8ShbsjwDhTgQiz+VMmWF39Nwav/ \ No newline at end of file diff --git a/examples/Storage/figure.png b/examples/Storage/figure.png new file mode 100644 index 0000000..73b1ae9 Binary files /dev/null and b/examples/Storage/figure.png differ diff --git a/examples/utils.py b/examples/utils.py index 8b466e0..ec0531f 100644 --- a/examples/utils.py +++ b/examples/utils.py @@ -101,7 +101,7 @@ def list_notebook(src: str) -> List[str]: @click.option('--export', nargs=1, help='export notebooks to directory given') def main(src: str, check: str, export: str): for name in list_notebook(src): - print(name, ':', end='') + print('{:30}'.format(name), ':', end='') nb = open_nb(name, src) nb = execute(nb, name, src) if check: diff --git a/hadar/__init__.py b/hadar/__init__.py index 71a5a6d..d710292 100644 --- a/hadar/__init__.py +++ b/hadar/__init__.py @@ -12,12 +12,12 @@ from .workflow.pipeline import RestrictedPlug, FreePlug, Stage, FocusStage, Drop, Rename, Fault, RepeatScenario, ToShuffler, Clip from .workflow.shuffler import Shuffler from .optimizer.input import Consumption, Link, Production, InputNode, Study -from .optimizer.output import OutputProduction, OutputNode, OutputLink, OutputConsumption, Result +from .optimizer.output import OutputProduction, OutputStorage, OutputNode, OutputLink, OutputConsumption, OutputNetwork, OutputConverter, Result from .optimizer.optimizer import LPOptimizer, RemoteOptimizer from .viewer.html import HTMLPlotting from .analyzer.result import ResultAnalyzer -__version__ = '0.3.1' +__version__ = '0.4.0' level = os.getenv('HADAR_LOG', 'WARNING') diff --git a/hadar/analyzer/result.py b/hadar/analyzer/result.py index 342c373..74a5b02 100644 --- a/hadar/analyzer/result.py +++ b/hadar/analyzer/result.py @@ -5,16 +5,15 @@ # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. from functools import reduce -from typing import Union, TypeVar, List, Generic, Type, Any, Dict +from typing import TypeVar, List, Generic, Type -import pandas as pd import numpy as np +import pandas as pd -from hadar.optimizer.output import Result, OutputNode from hadar.optimizer.input import Study +from hadar.optimizer.output import Result -__all__ = ['ResultAnalyzer', 'FluentAPISelector'] - +__all__ = ['ResultAnalyzer', 'NetworkFluentAPISelector'] T = TypeVar('T') @@ -23,6 +22,7 @@ class Index(Generic[T]): """ Generic Index to use to select and rank data. """ + def __init__(self, column, index=None): """ Initiate instance. @@ -65,30 +65,63 @@ def is_alone(self) -> bool: class ProdIndex(Index[str]): """Index implementation to filter productions""" + def __init__(self, index): Index.__init__(self, column='name', index=index) class ConsIndex(Index[str]): """ Index implementation to filter consumptions""" + + def __init__(self, index): + Index.__init__(self, column='name', index=index) + + +class StorIndex(Index[str]): + """ Index implementation to filter storage""" + def __init__(self, index): Index.__init__(self, column='name', index=index) class LinkIndex(Index[str]): """Index implementation to filter destination node""" + def __init__(self, index): Index.__init__(self, column='dest', index=index) +class SrcConverter(Index[str]): + """Index implementation to filter source converter""" + + def __init__(self, index): + Index.__init__(self, column='name', index=index) + + +class DestConverter(Index[str]): + """Index implementation to filter destination converter""" + + def __init__(self, index): + Index.__init__(self, column='name', index=index) + + class NodeIndex(Index[str]): - """Index implementation to filter name of elements""" + """Index implementation to filter node""" + def __init__(self, index): Index.__init__(self, column='node', index=index) +class NetworkIndex(Index[str]): + """Index implementation fo filter network""" + + def __init__(self, index): + Index.__init__(self, column='network', index=index) + + class IntIndex(Index[int]): """Index implementation to handle int index with slice""" + def __init__(self, column: str, index): """ Create instance. @@ -107,12 +140,14 @@ def __init__(self, column: str, index): class TimeIndex(IntIndex): """Index implementation to filter by time step""" + def __init__(self, index): IntIndex.__init__(self, column='t', index=index) class ScnIndex(IntIndex): """index implementation to filter by scenario""" + def __init__(self, index): IntIndex.__init__(self, column='scn', index=index) @@ -121,6 +156,7 @@ class ResultAnalyzer: """ Single object to encapsulate all postprocessing aggregation. """ + def __init__(self, study: Study, result: Result): """ Create an instance. @@ -133,34 +169,44 @@ def __init__(self, study: Study, result: Result): self.consumption = ResultAnalyzer._build_consumption(self.study, self.result) self.production = ResultAnalyzer._build_production(self.study, self.result) + self.storage = ResultAnalyzer._build_storage(self.study, self.result) self.link = ResultAnalyzer._build_link(self.study, self.result) + self.src_converter = ResultAnalyzer._build_src_converter(self.study, self.result) + self.dest_converter = ResultAnalyzer._build_dest_converter(self.study, self.result) @staticmethod def _build_consumption(study: Study, result: Result): """ Flat all data to build global consumption dataframe - columns: | cost | name | node | asked | given | t | + columns: | cost | name | node | network | asked | given | t | scn | """ + h = study.horizon scn = study.nb_scn - s = scn * h * sum([len(n.consumptions) for n in study.nodes.values()]) - cons = {'cost': np.empty(s), 'asked': np.empty(s), 'given': np.empty(s), - 'name': np.empty(s), 'node': np.empty(s), 't': np.empty(s), 'scn': np.empty(s)} + elements = sum([sum([len(n.consumptions) for n in net.nodes.values()]) for net in study.networks.values()]) + size = scn * h * elements + cons = {'cost': np.empty(size, dtype=float), 'asked': np.empty(size, dtype=float), + 'given': np.empty(size, dtype=float), + 'name': np.empty(size, dtype=str), 'node': np.empty(size, dtype=str), + 'network': np.empty(size, dtype=str), + 't': np.empty(size, dtype=float), 'scn': np.empty(size, dtype=float)} cons = pd.DataFrame(data=cons) n_cons = 0 - for n, name in enumerate(result.nodes.keys()): - for i, c in enumerate(result.nodes[name].consumptions): - slices = cons.index[n_cons * h * scn: (n_cons + 1) * h * scn] - cons.loc[slices, 'cost'] = c.cost - cons.loc[slices, 'name'] = c.name - cons.loc[slices, 'node'] = name - cons.loc[slices, 'asked'] = study.nodes[name].consumptions[i].quantity.flatten() - cons.loc[slices, 'given'] = c.quantity.flatten() - cons.loc[slices, 't'] = np.tile(np.arange(h), scn) - cons.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) - - n_cons += 1 + for n, net in result.networks.items(): + for node in net.nodes.keys(): + for i, c in enumerate(net.nodes[node].consumptions): + slices = cons.index[n_cons * h * scn: (n_cons + 1) * h * scn] + cons.loc[slices, 'cost'] = c.cost.flatten() + cons.loc[slices, 'name'] = c.name + cons.loc[slices, 'node'] = node + cons.loc[slices, 'network'] = n + cons.loc[slices, 'asked'] = study.networks[n].nodes[node].consumptions[i].quantity.flatten() + cons.loc[slices, 'given'] = c.quantity.flatten() + cons.loc[slices, 't'] = np.tile(np.arange(h), scn) + cons.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) + + n_cons += 1 return cons @@ -168,31 +214,85 @@ def _build_consumption(study: Study, result: Result): def _build_production(study: Study, result: Result): """ Flat all data to build global production dataframe - columns: | cost | avail | used | name | node | t | + columns: | cost | avail | used | network | name | node | t | """ h = study.horizon scn = study.nb_scn - s = scn * h * sum([len(n.productions) for n in result.nodes.values()]) - prod = {'cost': np.empty(s), 'avail': np.empty(s), 'used': np.empty(s), - 'name': np.empty(s), 'node': np.empty(s), 't': np.empty(s), 'scn': np.empty(s)} + elements = sum([sum([len(n.productions) for n in net.nodes.values()]) for net in study.networks.values()]) + size = scn * h * elements + prod = {'cost': np.empty(size, dtype=float), 'avail': np.empty(size, dtype=float), + 'used': np.empty(size, dtype=float), + 'name': np.empty(size, dtype=str), 'node': np.empty(size, dtype=str), + 'network': np.empty(size, dtype=str), + 't': np.empty(size, dtype=float), 'scn': np.empty(size, dtype=float)} prod = pd.DataFrame(data=prod) n_prod = 0 - for n, name in enumerate(result.nodes.keys()): - for i, c in enumerate(result.nodes[name].productions): - slices = prod.index[n_prod * h * scn: (n_prod + 1) * h * scn] - prod.loc[slices, 'cost'] = c.cost - prod.loc[slices, 'name'] = c.name - prod.loc[slices, 'node'] = name - prod.loc[slices, 'avail'] = study.nodes[name].productions[i].quantity.flatten() - prod.loc[slices, 'used'] = c.quantity.flatten() - prod.loc[slices, 't'] = np.tile(np.arange(h), scn) - prod.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) - - n_prod += 1 + for n, net in result.networks.items(): + for node in net.nodes.keys(): + for i, c in enumerate(net.nodes[node].productions): + slices = prod.index[n_prod * h * scn: (n_prod + 1) * h * scn] + prod.loc[slices, 'cost'] = c.cost.flatten() + prod.loc[slices, 'name'] = c.name + prod.loc[slices, 'node'] = node + prod.loc[slices, 'network'] = n + prod.loc[slices, 'avail'] = study.networks[n].nodes[node].productions[i].quantity.flatten() + prod.loc[slices, 'used'] = c.quantity.flatten() + prod.loc[slices, 't'] = np.tile(np.arange(h), scn) + prod.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) + + n_prod += 1 return prod + @staticmethod + def _build_storage(study: Study, result: Result): + """ + Flat all data to build global storage dataframe + :param study: + :param result: + :return: + """ + h = study.horizon + scn = study.nb_scn + elements = sum([sum([len(n.storages) for n in net.nodes.values()]) for net in study.networks.values()]) + size = h * scn * elements + + stor = {'max_capacity': np.empty(size, dtype=float), 'capacity': np.empty(size, dtype=float), + 'max_flow_in': np.empty(size, dtype=float), 'flow_in': np.empty(size, dtype=float), + 'max_flow_out': np.empty(size, dtype=float), 'flow_out': np.empty(size, dtype=float), + 'cost': np.empty(size, dtype=float), + 'init_capacity': np.empty(size, dtype=float), 'eff': np.empty(size, dtype=float), + 'name': np.empty(size, dtype=str), 'node': np.empty(size, dtype=str), + 'network': np.empty(size, dtype=str), + 't': np.empty(size, dtype=float), 'scn': np.empty(size, dtype=float)} + + stor = pd.DataFrame(data=stor) + + n_stor = 0 + for n, net in result.networks.items(): + for node in net.nodes.keys(): + for i, c in enumerate(net.nodes[node].storages): + slices = stor.index[n_stor * h * scn: (n_stor + 1) * h * scn] + study_stor = study.networks[n].nodes[node].storages[i] + + stor.loc[slices, 'max_capacity'] = study_stor.capacity + stor.loc[slices, 'capacity'] = c.capacity.flatten() + stor.loc[slices, 'max_flow_in'] = study_stor.flow_in + stor.loc[slices, 'flow_in'] = c.flow_in.flatten() + stor.loc[slices, 'max_flow_out'] = study_stor.flow_out + stor.loc[slices, 'flow_out'] = c.flow_out.flatten() + stor.loc[slices, 'cost'] = study_stor.cost + stor.loc[slices, 'init_capacity'] = study_stor.init_capacity + stor.loc[slices, 'eff'] = study_stor.eff + stor.loc[slices, 'network'] = n + stor.loc[slices, 'name'] = c.name + stor.loc[slices, 'node'] = node + stor.loc[slices, 't'] = np.tile(np.arange(h), scn) + stor.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) + + return stor + @staticmethod def _build_link(study: Study, result: Result): """ @@ -201,27 +301,96 @@ def _build_link(study: Study, result: Result): """ h = study.horizon scn = study.nb_scn - s = h * scn * sum([len(n.links) for n in result.nodes.values()]) - link = {'cost': np.empty(s), 'avail': np.empty(s), 'used': np.empty(s), - 'node': np.empty(s), 'dest': np.empty(s), 't': np.empty(s), 'scn': np.empty(s)} + elements = sum([sum([len(n.links) for n in net.nodes.values()]) for net in study.networks.values()]) + size = h * scn * elements + + link = {'cost': np.empty(size, dtype=float), 'avail': np.empty(size, dtype=float), + 'used': np.empty(size, dtype=float), + 'node': np.empty(size, dtype=str), 'dest': np.empty(size, dtype=str), + 'network': np.empty(size, dtype=str), + 't': np.empty(size, dtype=float), 'scn': np.empty(size, dtype=float)} link = pd.DataFrame(data=link) n_link = 0 - for n, name in enumerate(result.nodes.keys()): - for i, c in enumerate(result.nodes[name].links): - slices = link.index[n_link * h * scn: (n_link + 1) * h * scn] - link.loc[slices, 'cost'] = c.cost - link.loc[slices, 'dest'] = c.dest - link.loc[slices, 'node'] = name - link.loc[slices, 'avail'] = study.nodes[name].links[i].quantity.flatten() - link.loc[slices, 'used'] = c.quantity.flatten() - link.loc[slices, 't'] = np.tile(np.arange(h), scn) - link.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) - - n_link += 1 + for n, net in result.networks.items(): + for node in net.nodes.keys(): + for i, c in enumerate(net.nodes[node].links): + slices = link.index[n_link * h * scn: (n_link + 1) * h * scn] + link.loc[slices, 'cost'] = c.cost.flatten() + link.loc[slices, 'dest'] = c.dest + link.loc[slices, 'node'] = node + link.loc[slices, 'network'] = n + link.loc[slices, 'avail'] = study.networks[n].nodes[node].links[i].quantity.flatten() + link.loc[slices, 'used'] = c.quantity.flatten() + link.loc[slices, 't'] = np.tile(np.arange(h), scn) + link.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) + + n_link += 1 return link + @staticmethod + def _build_dest_converter(study: Study, result: Result): + h = study.horizon + scn = study.nb_scn + elements = sum([len(v.src_ratios) for v in study.converters.values()]) + size = h * scn * elements + + dest_conv = {'name': np.empty(size, dtype=str), 'network': np.empty(size, dtype=str), + 'node': np.empty(size, dtype=str), 'flow': np.empty(size, dtype=float), + 'cost': np.empty(size, dtype=float), 'max': np.empty(size, dtype=float)} + dest_conv = pd.DataFrame(data=dest_conv) + + for i, (name, v) in enumerate(study.converters.items()): + slices = dest_conv.index[i * h * scn: (i + 1) * h * scn] + dest_conv.loc[slices, 'name'] = v.name + dest_conv.loc[slices, 'cost'] = v.cost + dest_conv.loc[slices, 'max'] = v.max + dest_conv.loc[slices, 'network'] = v.dest_network + dest_conv.loc[slices, 'node'] = v.dest_node + dest_conv.loc[slices, 'flow'] = result.converters[name].flow_dest.flatten() + dest_conv.loc[slices, 't'] = np.tile(np.arange(h), scn) + dest_conv.loc[slices, 'scn'] = np.repeat(np.arange(scn), h) + + return dest_conv + + @staticmethod + def _build_src_converter(study: Study, result: Result): + h = study.horizon + scn = study.nb_scn + elements = sum([len(v.src_ratios) for v in study.converters.values()]) + size = h * scn * elements + + src_conv = {'name': np.empty(size, dtype=str), 'network': np.empty(size, dtype=str), + 'node': np.empty(size, dtype=str), 'ratio': np.empty(size, dtype=float), + 'flow': np.empty(size, dtype=float), 'max': np.empty(size, dtype=float)} + src_conv = pd.DataFrame(data=src_conv) + + s = 0 + for name, v in study.converters.items(): + src_size = len(v.src_ratios) + e = s + h * scn * src_size + slices = src_conv.index[s:e] + src_conv.loc[slices, 'name'] = v.name + src_conv.loc[slices, 'max'] = v.max + src_conv.loc[slices, 't'] = np.tile(np.arange(h), scn * src_size) + src_conv.loc[slices, 'scn'] = np.repeat(np.arange(scn), h * src_size) + + for i_src, (net, node) in enumerate(v.src_ratios.keys()): + e = s + h * scn * (i_src + 1) + slices = src_conv.index[s:e] + src_conv.loc[slices, 'network'] = net + src_conv.loc[slices, 'node'] = node + src_conv.loc[slices, 'ratio'] = v.src_ratios[(net, node)] + src_conv.loc[slices, 'flow'] = result.converters[name].flow_src[(net, node)].flatten() + s = e + s = e + + src_conv.loc[:, 'max'] /= src_conv[ + 'ratio'] # max value is for output. Need to divide by ratio to find max for src + + return src_conv + @staticmethod def _remove_useless_index_level(df: pd.DataFrame, indexes: List[Index]) -> pd.DataFrame: """ @@ -247,8 +416,8 @@ def _pivot(indexes, df: pd.DataFrame) -> pd.DataFrame: :return: pivot table """ names = [i.column for i in indexes] - filtered = reduce(lambda a, b: a & b, (i.filter(df) for i in indexes)) - pt = pd.pivot_table(data=df[filtered], index=names, aggfunc=lambda x: x.iloc[0]) + mask = reduce(lambda a, b: a & b, (i.filter(df) for i in indexes)) + pt = pd.pivot_table(data=df[mask], index=names, aggfunc=lambda x: x.iloc[0]) return ResultAnalyzer._remove_useless_index_level(df=pt, indexes=indexes) @@ -274,12 +443,13 @@ def _assert_index(indexes: List[Index], type: Type): if not ResultAnalyzer.check_index(indexes, type): raise ValueError('Indexes must contain a {}'.format(type.__class__.__name__)) - def start(self, indexes: List[Index]) -> pd.DataFrame: + def filter(self, indexes: List[Index]) -> pd.DataFrame: """ Aggregate according to index level and filter. """ ResultAnalyzer._assert_index(indexes, TimeIndex) ResultAnalyzer._assert_index(indexes, NodeIndex) + ResultAnalyzer._assert_index(indexes, NetworkIndex) ResultAnalyzer._assert_index(indexes, ScnIndex) if ResultAnalyzer.check_index(indexes, ConsIndex): @@ -288,96 +458,134 @@ def start(self, indexes: List[Index]) -> pd.DataFrame: if ResultAnalyzer.check_index(indexes, ProdIndex): return ResultAnalyzer._pivot(indexes, self.production) + if ResultAnalyzer.check_index(indexes, StorIndex): + return ResultAnalyzer._pivot(indexes, self.storage) + if ResultAnalyzer.check_index(indexes, LinkIndex): return ResultAnalyzer._pivot(indexes, self.link) - def network(self): - return FluentAPISelector([], self) + if ResultAnalyzer.check_index(indexes, SrcConverter): + return ResultAnalyzer._pivot(indexes, self.src_converter) + + if ResultAnalyzer.check_index(indexes, DestConverter): + return ResultAnalyzer._pivot(indexes, self.dest_converter) + + def network(self, name='default'): + """ + Entry point for fluent api + :param name: network name. 'default' as default + :return: Fluent API Selector + """ + return NetworkFluentAPISelector([NetworkIndex(index=name)], self) - def get_elements_inside(self, node: str): + def get_elements_inside(self, node: str, network: str = 'default'): """ Get numbers of elements by node. + :param network: network name :param node: node name - :return: (nb of consumptions, nb of productions, nb of links (export)) + :return: (nb of consumptions, nb of productions, nb of storages, nb of links (export), nb of converters (export), nb of converters (import) """ - return len(self.result.nodes[node].consumptions),\ - len(self.result.nodes[node].productions),\ - len(self.result.nodes[node].links) + n = self.study.networks[network].nodes[node] + return len(n.consumptions), \ + len(n.productions), \ + len(n.storages), \ + len(n.links), \ + sum((network, node) in conv.src_ratios for conv in self.study.converters.values()), \ + sum((network == conv.dest_network) and (node == conv.dest_node) for conv in self.study.converters.values()) - def get_balance(self, node: str) -> np.ndarray: + def get_balance(self, node: str, network: str = 'default') -> np.ndarray: """ Compute balance over time on asked node. :param node: node asked + :param network: network asked. Default is 'default' :return: timeline array with balance exchanges value """ balance = np.zeros((self.nb_scn, self.study.horizon)) - im = pd.pivot_table(self.link[self.link['dest'] == node][['used', 'scn', 't']], index=['scn', 't'], aggfunc=np.sum) + mask = (self.link['dest'] == node) & (self.link['network'] == network) + im = pd.pivot_table(self.link[mask][['used', 'scn', 't']], index=['scn', 't'], aggfunc=np.sum) if im.size > 0: balance += -im['used'].values.reshape(self.nb_scn, self.horizon) - exp = pd.pivot_table(self.link[self.link['node'] == node][['used', 'scn', 't']], index=['scn', 't'], aggfunc=np.sum) + mask = (self.link['node'] == node) & (self.link['network'] == network) + exp = pd.pivot_table(self.link[mask][['used', 'scn', 't']], index=['scn', 't'], aggfunc=np.sum) if exp.size > 0: balance += exp['used'].values.reshape(self.nb_scn, self.horizon) return balance - def get_cost(self, node: str) -> np.ndarray: + def get_cost(self, node: str, network: str = 'default') -> np.ndarray: """ Compute adequacy cost on a node. :param node: node name + :param network: network name, 'default' as default :return: matrix (scn, time) """ - cost = np.zeros((self.nb_scn, self.horizon)) - c, p, b = self.get_elements_inside(node) + cost = np.zeros((self.nb_scn, self.horizon)) + c, p, s, l, _, v = self.get_elements_inside(node, network) if c: - cons = self.network().node(node).scn().time().consumption() + cons = self.network(network).node(node).scn().time().consumption() cost += ((cons['asked'] - cons['given']) * cons['cost']).groupby(axis=0, level=(0, 1)) \ .sum().sort_index(level=(0, 1)).values.reshape(self.nb_scn, self.horizon) if p: - prod = self.network().node(node).scn().time().production() + prod = self.network(network).node(node).scn().time().production() cost += (prod['used'] * prod['cost']).groupby(axis=0, level=(0, 1)) \ .sum().sort_index(level=(0, 1)).values.reshape(self.nb_scn, self.horizon) - if b: - link = self.network().node(node).scn().time().link() + if s: + stor = self.network(network).node(node).scn().time().storage() + cost += (stor['capacity'] * stor['cost']).groupby(axis=0, level=(0, 1)) \ + .sum().sort_index(level=(0, 1)).values.reshape(self.nb_scn, self.horizon) + + if l: + link = self.network(network).node(node).scn().time().link() cost += (link['used'] * link['cost']).groupby(axis=0, level=(0, 1)) \ .sum().sort_index(level=(0, 1)).values.reshape(self.nb_scn, self.horizon) + if v: + conv = self.network(network).node(node).scn().time().from_converter() + cost += (conv['flow'] * conv['cost']).groupby(axis=0, level=(0, 1)) \ + .sum().sort_index(level=(0, 1)).values.reshape(self.nb_scn, self.horizon) + return cost - def get_rac(self) -> np.ndarray: + def get_rac(self, network='default') -> np.ndarray: """ Compute Remain Availabale Capacities on network. + :param network: selecto network to compute. Default is default. :return: matrix (scn, time) """ - prod_used = self.production\ - .drop(['avail', 'cost'], axis=1)\ - .pivot_table(index='scn', columns='t', aggfunc=np.sum)\ + def fill_width_zeros(arr: np.ndarray) -> np.ndarray: + return np.zeros((self.nb_scn, self.horizon)) if arr.size == 0 else arr + prod_used = self.production[self.production['network'] == network] \ + .drop(['avail', 'cost'], axis=1) \ + .pivot_table(index='scn', columns='t', aggfunc=np.sum) \ .values + prod_used = fill_width_zeros(prod_used) - prod_avail = self.production\ - .drop(['used', 'cost'], axis=1)\ - .pivot_table(index='scn', columns='t', aggfunc=np.sum)\ + prod_avail = self.production[self.production['network'] == network] \ + .drop(['used', 'cost'], axis=1) \ + .pivot_table(index='scn', columns='t', aggfunc=np.sum) \ .values + prod_avail = fill_width_zeros(prod_avail) - cons_asked = self.consumption\ - .drop(['given', 'cost'], axis=1)\ - .pivot_table(index='scn', columns='t', aggfunc=np.sum)\ + cons_asked = self.consumption[self.consumption['network'] == network] \ + .drop(['given', 'cost'], axis=1) \ + .pivot_table(index='scn', columns='t', aggfunc=np.sum) \ .values + cons_asked = fill_width_zeros(cons_asked) - cons_given = self.consumption\ - .drop(['asked', 'cost'], axis=1)\ - .pivot_table(index='scn', columns='t', aggfunc=np.sum)\ + cons_given = self.consumption[self.consumption['network'] == network] \ + .drop(['asked', 'cost'], axis=1) \ + .pivot_table(index='scn', columns='t', aggfunc=np.sum) \ .values + cons_given = fill_width_zeros(cons_given) - rac = (prod_avail - prod_used) - (cons_asked - cons_given) - - return rac + return (prod_avail - prod_used) - (cons_asked - cons_given) @property def horizon(self) -> int: @@ -397,19 +605,19 @@ def nb_scn(self) -> int: """ return self.study.nb_scn - @property - def nodes(self) -> List[str]: + def nodes(self, network: str = 'default') -> List[str]: """ Shortcut to get list of node names + :param network: network selected :return: nodes name """ - return self.result.nodes.keys() + return list(self.result.networks[network].nodes.keys()) -class FluentAPISelector: +class NetworkFluentAPISelector: """ - Fluent Api Selector for Analyzer. + Fluent Api Selector to analyze network element. User can join network, node, consumption, production, link, time, scn to create filter and organize hierarchy. Join can me in any order, except: @@ -417,24 +625,24 @@ class FluentAPISelector: - join is unique only one element of node, time, scn are expected for each query - production, consumption and link are excluded themself, only on of them are expected for each query """ + FULL_DESCRIPTION = 5 # Need 5 indexes to describe completely a query + def __init__(self, indexes: List[Index], analyzer: ResultAnalyzer): self.indexes = indexes self.analyzer = analyzer if not ResultAnalyzer.check_index(indexes, ConsIndex) \ and not ResultAnalyzer.check_index(indexes, ProdIndex) \ - and not ResultAnalyzer.check_index(indexes, LinkIndex): + and not ResultAnalyzer.check_index(indexes, StorIndex) \ + and not ResultAnalyzer.check_index(indexes, LinkIndex) \ + and not ResultAnalyzer.check_index(indexes, SrcConverter) \ + and not ResultAnalyzer.check_index(indexes, DestConverter): self.consumption = lambda x=None: self._append(ConsIndex(x)) - - if not ResultAnalyzer.check_index(indexes, ProdIndex) \ - and not ResultAnalyzer.check_index(indexes, ConsIndex) \ - and not ResultAnalyzer.check_index(indexes, LinkIndex): self.production = lambda x=None: self._append(ProdIndex(x)) - - if not ResultAnalyzer.check_index(indexes, LinkIndex) \ - and not ResultAnalyzer.check_index(indexes, ConsIndex) \ - and not ResultAnalyzer.check_index(indexes, ProdIndex): self.link = lambda x=None: self._append(LinkIndex(x)) + self.storage = lambda x=None: self._append(StorIndex(x)) + self.to_converter = lambda x=None: self._append(SrcConverter(x)) + self.from_converter = lambda x=None: self._append(DestConverter(x)) if not ResultAnalyzer.check_index(indexes, NodeIndex): self.node = lambda x=None: self._append(NodeIndex(x)) @@ -445,6 +653,9 @@ def __init__(self, indexes: List[Index], analyzer: ResultAnalyzer): if not ResultAnalyzer.check_index(indexes, ScnIndex): self.scn = lambda x=None: self._append(ScnIndex(x)) + def _find_index_by_type(self, type: Type): + return [i for i in self.indexes if isinstance(i, type)][0] + def _append(self, index: Index): """ Decide what to do between finish query and start analyze or resume query @@ -453,7 +664,7 @@ def _append(self, index: Index): :return: """ self.indexes.append(index) - if len(self.indexes) == 4: - return self.analyzer.start(self.indexes) + if len(self.indexes) == NetworkFluentAPISelector.FULL_DESCRIPTION: + return self.analyzer.filter(self.indexes) else: - return FluentAPISelector(self.indexes, self.analyzer) \ No newline at end of file + return NetworkFluentAPISelector(self.indexes, self.analyzer) diff --git a/hadar/optimizer/input.py b/hadar/optimizer/input.py index 0f4d4c0..21ffb46 100644 --- a/hadar/optimizer/input.py +++ b/hadar/optimizer/input.py @@ -4,13 +4,16 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. - -from typing import List, Union, Dict +from abc import ABC, abstractmethod +from copy import deepcopy +from typing import List, Union, Dict, Tuple import numpy as np +__all__ = ['Consumption', 'Link', 'Production', 'Storage', 'Converter', 'InputNetwork', 'InputNode', 'Study', + 'NetworkFluentAPISelector', 'NodeFluentAPISelector'] -__all__ = ['Consumption', 'Link', 'Production', 'InputNode', 'Study', 'NetworkFluentAPISelector', 'NodeFluentAPISelector'] +import hadar class DTO: @@ -30,12 +33,34 @@ def __repr__(self): return self.__str__() -class Consumption(DTO): +class JSON(DTO, ABC): + + def to_json(self): + def convert(value): + if isinstance(value, JSON): + return value.to_json() + elif isinstance(value, dict): + return {k: convert(v) for k, v in value.items()} + elif isinstance(value, list) or isinstance(value, tuple): + return [convert(v) for v in value] + elif isinstance(value, np.ndarray): + return value.tolist() + return value + + return {k: convert(v) for k, v in self.__dict__.items()} + + @staticmethod + @abstractmethod + def from_json(dict): + pass + + +class Consumption(JSON): """ Consumption element. """ - def __init__(self, quantity: Union[List, np.ndarray, float], cost: int = 0, name: str = ''): + def __init__(self, quantity: Union[List, np.ndarray, float], cost: Union[List, np.ndarray, float], name: str = ''): """ Create consumption. @@ -43,16 +68,20 @@ def __init__(self, quantity: Union[List, np.ndarray, float], cost: int = 0, name :param cost: cost of unavailability :param name: name of consumption (unique for each node) """ - self.cost = cost + self.cost = np.array(cost) self.quantity = np.array(quantity) self.name = name + @staticmethod + def from_json(dict): + return Consumption(**dict) + -class Production(DTO): +class Production(JSON): """ Production element """ - def __init__(self, quantity: Union[List, np.ndarray, float], cost: int = 0, name: str = 'in'): + def __init__(self, quantity: Union[List, np.ndarray, float], cost: Union[List, np.ndarray, float], name: str = 'in'): """ Create production @@ -61,15 +90,48 @@ def __init__(self, quantity: Union[List, np.ndarray, float], cost: int = 0, name :param name: name of production (unique for each node) """ self.name = name - self.cost = cost + self.cost = np.array(cost) self.quantity = np.array(quantity) + @staticmethod + def from_json(dict): + return Production(**dict) -class Link(DTO): + +class Storage(JSON): + """ + Storage element + """ + def __init__(self, name, capacity: int, flow_in: float, flow_out: float, cost: float = 0, + init_capacity: int = 0, eff: float = 0.99): + """ + Create storage. + + :param capacity: maximum storage capacity (like of many quantity to use inside storage) + :param flow_in: max flow into storage during on time step + :param flow_out: max flow out storage during on time step + :param cost: unit cost of storage at each time-step. default 0 + :param init_capacity: initial capacity level. default 0 + :param eff: storage efficient (applied on input flow stored). default 0.99 + """ + self.name = name + self.capacity = capacity + self.flow_in = flow_in + self.flow_out = flow_out + self.cost = cost + self.init_capacity = init_capacity + self.eff = eff + + @staticmethod + def from_json(dict): + return Storage(**dict) + + +class Link(JSON): """ Link element """ - def __init__(self, dest: str, quantity: Union[List, np.ndarray, float], cost: int = 0): + def __init__(self, dest: str, quantity: Union[List, np.ndarray, float], cost: Union[List, np.ndarray, float]): """ Create link. @@ -79,124 +141,249 @@ def __init__(self, dest: str, quantity: Union[List, np.ndarray, float], cost: in """ self.dest = dest self.quantity = np.array(quantity) - self.cost = cost + self.cost = np.array(cost) + + @staticmethod + def from_json(dict): + return Link(**dict) -class InputNode(DTO): +class Converter(JSON): + """ + Converter element + """ + def __init__(self, name: str, src_ratios: Dict[Tuple[str, str], float], dest_network: str, dest_node: str, + cost: float, max: float,): + """ + Create converter. + + :param name: converter name + + :param src_ratios: ration conversion for each sources. data={(network, node): ratio} + :param dest_network: destination network + :param dest_node: dsetination node + :param cost: cost applied on quantity through converter + :param max: max output flow + """ + self.name = name + self.src_ratios = src_ratios + self.dest_network = dest_network + self.dest_node = dest_node + self.cost = cost + self.max = max + + def to_json(self) -> dict: + dict = deepcopy(self.__dict__) + # src_ratios has a tuple of two string as key. These forbidden by JSON. + # Therefore when serialized we join these two strings with '::' to create on string as key + # Ex: ('elec', 'a') --> 'elec::a' + dict['src_ratios'] = {'::'.join(k): v for k, v in self.src_ratios.items()} + return dict + + @staticmethod + def from_json(dict: dict): + # When deserialize, we need to split key string of src_network. + # JSON doesn't accept tuple as key, so two string was joined for serialization + # Ex: 'elec::a' -> ('elec', 'a') + dict['src_ratios'] = {tuple(k.split('::')): v for k, v in dict['src_ratios'].items()} + return Converter(**dict) + + +class InputNode(JSON): """ Node element """ - def __init__(self, consumptions: List[Consumption], productions: List[Production], links: List[Link]): + def __init__(self, consumptions: List[Consumption], productions: List[Production], + storages: List[Storage], links: List[Link]): """ Create node element. :param consumptions: list of consumptions inside node :param productions: list of productions inside node + :param storages: list of storages inside node :param links: list of links inside node """ self.consumptions = consumptions self.productions = productions + self.storages = storages self.links = links + @staticmethod + def from_json(dict): + dict['consumptions'] = [Consumption.from_json(v) for v in dict['consumptions']] + dict['productions'] = [Production.from_json(v) for v in dict['productions']] + dict['storages'] = [Storage.from_json(v) for v in dict['storages']] + dict['links'] = [Link.from_json(v) for v in dict['links']] + return InputNode(**dict) + + +class InputNetwork(JSON): + """ + Network element + """ + def __init__(self, nodes: Dict[str, InputNode] = None): + """ + Create network element + + :param nodes: nodes list inside network + """ + self.nodes = nodes if nodes else {} -class Study(DTO): + @staticmethod + def from_json(dict): + dict['nodes'] = {k: InputNode.from_json(v) for k, v in dict['nodes'].items()} + return InputNetwork(**dict) + + +class Study(JSON): """ Main object to facilitate to build a study """ - def __init__(self, horizon: int, nb_scn: int = 1): + def __init__(self, horizon: int, nb_scn: int = 1, version: str = None): """ Instance study. :param horizon: simulation time horizon (i.e. number of time step in simulation) :param nb_scn: number of scenarios in study. Default is 1. """ - - self.nodes = dict() + self.version = version or hadar.__version__ + self.networks = dict() + self.converters = dict() self.horizon = horizon self.nb_scn = nb_scn - def network(self): + @staticmethod + def from_json(dict): + dict = deepcopy(dict) + study = Study(horizon=dict['horizon'], nb_scn=dict['nb_scn'], version=dict['version']) + study.networks = {k: InputNetwork.from_json(v) for k, v in dict['networks'].items()} + study.converters = {k: Converter.from_json(v) for k, v in dict['converters'].items()} + return study + + def network(self, name='default'): """ Entry point to create study with the fluent api. :return: """ - return NetworkFluentAPISelector(study=self) + self.add_network(name) + return NetworkFluentAPISelector(selector={'network': name}, study=self) - def add_link(self, src: str, dest: str, cost: int, quantity: Union[List[float], np.ndarray, float]): + def add_link(self, network: str, src: str, dest: str, cost: int, quantity: Union[List[float], np.ndarray, float]): """ Add a link inside network. + :param network: network where nodes belong :param src: source node name :param dest: destination node name :param cost: cost of use :param quantity: transfer capacity :return: """ - if cost < 0: - raise ValueError('link cost must be positive') - if src not in self.nodes.keys(): + if src not in self.networks[network].nodes.keys(): raise ValueError('link source must be a valid node') - if dest not in self.nodes.keys(): + if dest not in self.networks[network].nodes.keys(): raise ValueError('link destination must be a valid node') - if dest in [l.dest for l in self.nodes[src].links]: + if dest in [l.dest for l in self.networks[network].nodes[src].links]: raise ValueError('link destination must be unique on a node') - quantity = self._validate_quantity(quantity) - self.nodes[src].links.append(Link(dest=dest, quantity=quantity, cost=cost)) + quantity = self._standardize_array(quantity) + if np.any(quantity < 0): + raise ValueError('Link quantity must be positive') + + cost = self._standardize_array(cost) + self.networks[network].nodes[src].links.append(Link(dest=dest, quantity=quantity, cost=cost)) return self - def add_node(self, node): - if node not in self.nodes.keys(): - self.nodes[node] = InputNode(consumptions=[], productions=[], links=[]) + def add_network(self, network: str): + if network not in self.networks.keys(): + self.networks[network] = InputNetwork() + + def add_node(self, network: str, node: str): + if node not in self.networks[network].nodes.keys(): + self.networks[network].nodes[node] = InputNode(consumptions=[], productions=[], links=[], storages=[]) - def _add_production(self, node: str, prod: Production): - if prod.cost < 0: - raise ValueError('production cost must be positive') - if prod.name in [p.name for p in self.nodes[node].productions]: + def _add_production(self, network: str, node: str, prod: Production): + if prod.name in [p.name for p in self.networks[network].nodes[node].productions]: raise ValueError('production name must be unique on a node') - prod.quantity = self._validate_quantity(prod.quantity) - self.nodes[node].productions.append(prod) + prod.quantity = self._standardize_array(prod.quantity) + if np.any(prod.quantity < 0): + raise ValueError('Production quantity must be positive') + + prod.cost = self._standardize_array(prod.cost) + self.networks[network].nodes[node].productions.append(prod) - def _add_consumption(self, node: str, cons: Consumption): - if cons.cost < 0: - raise ValueError('consumption cost must be positive') - if cons.name in [c.name for c in self.nodes[node].consumptions]: + def _add_consumption(self, network: str, node: str, cons: Consumption): + if cons.name in [c.name for c in self.networks[network].nodes[node].consumptions]: raise ValueError('consumption name must be unique on a node') - cons.quantity = self._validate_quantity(cons.quantity) - self.nodes[node].consumptions.append(cons) + cons.quantity = self._standardize_array(cons.quantity) + if np.any(cons.quantity < 0): + raise ValueError('Consumption quantity must be positive') - def _validate_quantity(self, quantity: Union[List[float], np.ndarray, float]) -> np.ndarray: - quantity = np.array(quantity) + cons.cost = self._standardize_array(cons.cost) + self.networks[network].nodes[node].consumptions.append(cons) - # If quantity are negative raise error: - if np.any(quantity < 0): - raise ValueError('Quantity must be positive') + def _add_storage(self, network: str, node: str, store: Storage): + if store.name in [s.name for s in self.networks[network].nodes[node].storages]: + raise ValueError('storage name must be unique on a node') + if store.flow_in < 0 or store.flow_out < 0: + raise ValueError('storage flow must be positive') + if store.capacity < 0 or store.init_capacity < 0: + raise ValueError('storage capacities must be positive') + if store.eff < 0 or store.eff > 1: + raise ValueError('storage efficiency must be in ]0, 1[') + + self.networks[network].nodes[node].storages.append(store) + + def _add_converter(self, name: str): + if name not in [v for v in self.converters]: + self.converters[name] = Converter(name=name, src_ratios={}, dest_network='', + dest_node='', cost=0, max=0) + + def _add_converter_src(self, name: str, network: str, node: str, ratio: float): + if (network, node) in self.converters[name].src_ratios: + raise ValueError('converter input already has node %s on network %s' % (node, network)) + + self.converters[name].src_ratios[(network, node)] = ratio + + def _set_converter_dest(self, name: str, network: str, node: str, cost: float, max: float): + if self.converters[name].dest_network and self.converters[name].dest_node: + raise ValueError('converter has already output set') + if network not in self.networks or node not in self.networks[network].nodes.keys(): + raise ValueError('Node %s is not present in network %s' % (node, network)) + + self.converters[name].dest_network = network + self.converters[name].dest_node = node + self.converters[name].cost = cost + self.converters[name].max = max + + def _standardize_array(self, array: Union[List[float], np.ndarray, float]) -> np.ndarray: + array = np.array(array, dtype=float) # If scenario and horizon are not provided, expend on both side - if quantity.size == 1: - return np.ones((self.nb_scn, self.horizon)) * quantity + if array.size == 1: + return np.ones((self.nb_scn, self.horizon)) * array # If scenario are not provided copy timeseries for each scenario - if quantity.shape == (self.horizon,): - return np.tile(quantity, (self.nb_scn, 1)) + if array.shape == (self.horizon,): + return np.tile(array, (self.nb_scn, 1)) # If horizon are not provide extend each scenario to full horizon - if quantity.shape == (self.nb_scn, 1): - return np.tile(quantity, self.horizon) + if array.shape == (self.nb_scn, 1): + return np.tile(array, self.horizon) # If perfect size - if quantity.shape == (self.nb_scn, self.horizon): - return quantity + if array.shape == (self.nb_scn, self.horizon): + return array # If any size pattern matches, raise error on quantity size given - horizon_given = quantity.shape[0] if len(quantity.shape) == 1 else quantity.shape[1] - sc_given = 1 if len(quantity.shape) == 1 else quantity.shape[0] - raise ValueError('Quantity must be: a number, an array like (horizon, ) or (nb_scn, 1) or (nb_scn, horizon). ' + horizon_given = array.shape[0] if len(array.shape) == 1 else array.shape[1] + sc_given = 1 if len(array.shape) == 1 else array.shape[0] + raise ValueError('Array must be: a number, an array like (horizon, ) or (nb_scn, 1) or (nb_scn, horizon). ' 'In your case horizon specified is %d and actual is %d. ' 'And nb_scn specified %d is whereas actual is %d' % (self.horizon, horizon_given, self.nb_scn, sc_given)) @@ -206,9 +393,9 @@ class NetworkFluentAPISelector: """ Network level of Fluent API Selector. """ - def __init__(self, study): + def __init__(self, study, selector): self.study = study - self.selector = dict() + self.selector = selector def node(self, name): """ @@ -218,7 +405,7 @@ def node(self, name): :return: NodeFluentAPISelector initialized """ self.selector['node'] = name - self.study.add_node(name) + self.study.add_node(network=self.selector['network'], node=name) return NodeFluentAPISelector(self.study, self.selector) def link(self, src: str, dest: str, cost: int, quantity: Union[List, np.ndarray, float]): @@ -232,8 +419,33 @@ def link(self, src: str, dest: str, cost: int, quantity: Union[List, np.ndarray, :return: NetworkAPISelector with new link. """ - self.study.add_link(src=src, dest=dest, cost=cost, quantity=quantity) - return NetworkFluentAPISelector(self.study) + self.study.add_link(network=self.selector['network'], src=src, dest=dest, cost=cost, quantity=quantity) + return NetworkFluentAPISelector(self.study, self.selector) + + def network(self, name='default'): + """ + Go to network level. + + :param name: network level, 'default' as default name + :return: NetworkAPISelector with selector set to 'default' + """ + self.study.add_network(name) + return NetworkFluentAPISelector(selector={'network': name}, study=self.study) + + def converter(self, name: str, to_network: str, to_node: str, max: float, cost: float = 0): + """ + Add a converter element. + + :param name: converter name + :param to_network: converter output network + :param to_node: converter output node on network + :param max: maximum quantity from converter + :param cost: cost for each quantity produce by converter + :return: + """ + self.study._add_converter(name=name) + self.study._set_converter_dest(name=name, network=to_network, node=to_node, cost=cost, max=max) + return NetworkFluentAPISelector(selector={}, study=self.study) def build(self): """ @@ -261,7 +473,8 @@ def consumption(self, name: str, cost: int, quantity: Union[List, np.ndarray, fl :param quantity: consumption to sustain :return: NodeFluentAPISelector with new consumption """ - self.study._add_consumption(node=self.selector['node'], cons=Consumption(name=name, cost=cost, quantity=quantity)) + self.study._add_consumption(network=self.selector['network'], node=self.selector['node'], + cons=Consumption(name=name, cost=cost, quantity=quantity)) return self def production(self, name: str, cost: int, quantity: Union[List, np.ndarray, float]): @@ -273,7 +486,25 @@ def production(self, name: str, cost: int, quantity: Union[List, np.ndarray, flo :param quantity: available capacities :return: NodeFluentAPISelector with new production """ - self.study._add_production(node=self.selector['node'], prod=Production(name=name, cost=cost, quantity=quantity)) + self.study._add_production(network=self.selector['network'], node=self.selector['node'], + prod=Production(name=name, cost=cost, quantity=quantity)) + return self + + def storage(self, name, capacity: int, flow_in: float, flow_out: float, cost: float = 0, + init_capacity: int = 0, eff: int = 0.99): + """ + Create storage. + + :param capacity: maximum storage capacity (like of many quantity to use inside storage) + :param flow_in: max flow into storage during on time step + :param flow_out: max flow out storage during on time step + :param cost: unit cost of storage at each time-step. default 0 + :param init_capacity: initial capacity level. default 0 + :param eff: storage efficient (applied on input flow stored). default 0.99 + """ + self.study._add_storage(network=self.selector['network'], node=self.selector['node'], + store=Storage(name=name, capacity=capacity, flow_in=flow_in, flow_out=flow_out, + cost=cost, init_capacity=init_capacity, eff=eff)) return self def node(self, name): @@ -283,7 +514,7 @@ def node(self, name): :param name: new node level :return: NodeFluentAPISelector """ - return NetworkFluentAPISelector(self.study).node(name) + return NetworkFluentAPISelector(self.study, self.selector).node(name) def link(self, src: str, dest: str, cost: int, quantity: Union[List, np.ndarray, float]): """ @@ -296,7 +527,42 @@ def link(self, src: str, dest: str, cost: int, quantity: Union[List, np.ndarray, :return: NetworkAPISelector with new link. """ - return NetworkFluentAPISelector(self.study).link(src=src, dest=dest, cost=cost, quantity=quantity) + return NetworkFluentAPISelector(self.study, self.selector).link(src=src, dest=dest, cost=cost, quantity=quantity) + + def network(self, name='default'): + """ + Go to network level. + + :param name: network level, 'default' as default name + :return: NetworkAPISelector with selector set to 'default' + """ + return NetworkFluentAPISelector(selector={}, study=self.study).network(name) + + def converter(self, name: str, to_network: str, to_node: str, max: float, cost: float = 0): + """ + Add a converter element. + + :param name: converter name + :param to_network: converter output network + :param to_node: converter output node on network + :param max: maximum quantity from converter + :param cost: cost for each quantity produce by converter + :return: + """ + return NetworkFluentAPISelector(selector={}, study=self.study)\ + .converter(name=name, to_network=to_network, to_node=to_node, max=max, cost=cost) + + def to_converter(self, name: str, ratio: float = 1): + """ + Add an ouptput to converter. + + :param name: converter name + :param ratio: ratio for output + :return: + """ + self.study._add_converter(name=name) + self.study._add_converter_src(name=name, network=self.selector['network'], node=self.selector['node'], ratio=ratio) + return self def build(self): """ diff --git a/hadar/optimizer/lp/domain.py b/hadar/optimizer/lp/domain.py index 432adba..996539d 100644 --- a/hadar/optimizer/lp/domain.py +++ b/hadar/optimizer/lp/domain.py @@ -4,12 +4,11 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. -import numpy as np -from typing import List, Union +from typing import List, Union, Dict, Tuple from ortools.linear_solver.pywraplp import Variable -from hadar.optimizer.input import DTO +from hadar.optimizer.input import DTO, Study class SerializableVariable(DTO): @@ -46,6 +45,7 @@ def __reduce__(self): """ return self.__class__, (self.quantity, SerializableVariable(self.variable), self.cost, self.name) + class LPProduction(DTO): """ Production element for linear programming. @@ -73,6 +73,49 @@ def __reduce__(self): return self.__class__, (self.quantity, SerializableVariable(self.variable), self.cost, self.name) +class LPStorage(DTO): + """ + Storage element + """ + def __init__(self, name, capacity: int, var_capacity: Union[Variable, SerializableVariable], + flow_in: float, var_flow_in: Union[Variable, SerializableVariable], + flow_out: float, var_flow_out: Union[Variable, SerializableVariable], + cost: float = 0, init_capacity: int = 0, eff: float = .99): + """ + Create storage. + + :param capacity: maximum storage capacity (like of many quantity to use inside storage) + :param var_capacity: solver variable for capacity + :param flow_in: max flow into storage during on time step + :param var_flow_in: solver variable for var_flow_in + :param flow_out: max flow out storage during on time step + :param var_flow_out: solver variable for var_flow_out + :param cost: unit cost of storage at each time-step. default 0 + :param init_capacity: initial capacity level + :param eff: storage efficient. (applied on input flow stored) + """ + self.name = name + self.capacity = capacity + self.var_capacity = var_capacity + self.flow_in = flow_in + self.var_flow_in = var_flow_in + self.flow_out = flow_out + self.var_flow_out = var_flow_out + self.cost = cost + self.init_capacity = init_capacity + self.eff = eff + + def __reduce__(self): + """ + Help pickle to serialize object, specially variable object + :return: (constructor, values...) + """ + return self.__class__, (self.name, self.capacity, SerializableVariable(self.var_capacity), + self.flow_in, SerializableVariable(self.var_flow_in), + self.flow_out, SerializableVariable(self.var_flow_out), + self.cost, self.init_capacity, self.eff) + + class LPLink(DTO): """ Link element for linear programming @@ -101,11 +144,52 @@ def __reduce__(self): return self.__class__, (self.src, self.dest, self.quantity, SerializableVariable(self.variable), self.cost) +class LPConverter(DTO): + """ + Converter element for linear programming + """ + def __init__(self, name: str, src_ratios: Dict[Tuple[str, str], float], + var_flow_src: Dict[Tuple[str, str], Union[Variable, SerializableVariable]], + dest_network: str, dest_node: str, + var_flow_dest: Union[Variable, SerializableVariable], + cost: float, max: float,): + """ + Create converter. + + :param name: converter name + + :param src_ratios: ration conversion for each sources. data={(network, node): ratio} + :param var_flow_src: ortools variables represents quantity from sources + :param dest_network: destination network + :param dest_node: destination node + :param var_flow_dest: ortools variables represents quantity to destination + :param cost: cost applied on quantity through converter + :param max: max output flow + """ + self.name = name + self.src_ratios = src_ratios + self.var_flow_src = var_flow_src + self.dest_network = dest_network + self.dest_node = dest_node + self.var_flow_dest = var_flow_dest + self.cost = cost + self.max = max + + def __reduce__(self): + """ + Help pickle to serialize object, specially variable object + :return: (constructor, values...) + """ + return self.__class__, (self.name, self.src_ratios, {src: SerializableVariable(var) for src, var in self.var_flow_src.items()}, + self.dest_network, self.dest_node, SerializableVariable(self.var_flow_dest), self.cost, self.max) + + class LPNode(DTO): """ Node element for linear programming """ - def __init__(self, consumptions: List[LPConsumption], productions: List[LPProduction], links: List[LPLink]): + def __init__(self, consumptions: List[LPConsumption], productions: List[LPProduction], + storages: List[LPStorage], links: List[LPLink]): """ Instance node. @@ -115,4 +199,31 @@ def __init__(self, consumptions: List[LPConsumption], productions: List[LPProduc """ self.consumptions = consumptions self.productions = productions - self.links = links \ No newline at end of file + self.storages = storages + self.links = links + + +class LPNetwork(DTO): + """ + Network element for linear programming + """ + + def __init__(self, nodes: Dict[str, LPNode] = None): + """ + Instance network. + + :param study: Study to use to generate blank network + """ + self.nodes = nodes if nodes else dict() + + +class LPTimeStep(DTO): + def __init__(self, networks: Dict[str, LPNetwork], converters: Dict[str, LPConverter]): + self.networks = networks + self.converters = converters + + @staticmethod + def create_like_study(study: Study): + networks = {name: LPNetwork() for name in study.networks} + converters = dict() + return LPTimeStep(networks=networks, converters=converters) diff --git a/hadar/optimizer/lp/mapper.py b/hadar/optimizer/lp/mapper.py index 746ada2..94bdb8e 100644 --- a/hadar/optimizer/lp/mapper.py +++ b/hadar/optimizer/lp/mapper.py @@ -4,12 +4,12 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. - +import numpy as np from ortools.linear_solver.pywraplp import Solver -from hadar.optimizer.input import Study -from hadar.optimizer.lp.domain import LPLink, LPConsumption, LPNode, LPProduction -from hadar.optimizer.output import OutputNode, Result +from hadar.optimizer.input import Study, InputNetwork +from hadar.optimizer.lp.domain import LPLink, LPConsumption, LPNode, LPProduction, LPStorage, LPConverter +from hadar.optimizer.output import OutputNode, Result, OutputNetwork, OutputConverter class InputMapper: @@ -27,28 +27,57 @@ def __init__(self, solver: Solver, study: Study): self.solver = solver self.study = study - def get_var(self, name: str, t: int, scn: int) -> LPNode: + def get_node_var(self, network: str, node: str, t: int, scn: int) -> LPNode: """ Map InputNode to LPNode. - :param name: node name - :param t: timestamp + :param network: network name + :param node: node name + :param t: time step :param scn: scenario index :return: LPNode according to node name at t in study """ - consumptions = [LPConsumption(name=c.name, cost=float(c.cost), quantity=c.quantity[scn, t], - variable=self.solver.NumVar(0, float(c.quantity[scn, t]), name='lol {} on {} at t={} for scn={}'.format(c.name, name, t, scn))) - for c in self.study.nodes[name].consumptions] + suffix = 'inside network=%s on node=%s at t=%d for scn=%d' % (network, node, t, scn) + in_node = self.study.networks[network].nodes[node] + + consumptions = [LPConsumption(name=c.name, cost=c.cost[scn, t], quantity=c.quantity[scn, t], + variable=self.solver.NumVar(0, float(c.quantity[scn, t]), name='lol=%s %s' % (c.name, suffix))) + for c in in_node.consumptions] + + productions = [LPProduction(name=p.name, cost=p.cost[scn, t], quantity=p.quantity[scn, t], + variable=self.solver.NumVar(0, float(p.quantity[scn, t]), 'prod=%s %s' % (p.name, suffix))) + for p in in_node.productions] + + storages = [LPStorage(name=s.name, capacity=s.capacity, flow_in=s.flow_in, flow_out=s.flow_out, eff=s.eff, + init_capacity=s.init_capacity, cost=s.cost, + var_capacity=self.solver.NumVar(0, float(s.capacity), 'storage_capacity=%s %s' % (s.name, suffix)), + var_flow_in=self.solver.NumVar(0, float(s.flow_in), 'storage_flow_in=%s %s' % (s.name, suffix)), + var_flow_out=self.solver.NumVar(0, float(s.flow_out), 'storage_flow_out=%s %s' % (s.name, suffix))) + for s in in_node.storages] + + links = [LPLink(dest=l.dest, cost=l.cost[scn, t], src=node, quantity=l.quantity[scn, t], + variable=self.solver.NumVar(0, float(l.quantity[scn, t]), 'link=%s %s' % (l.dest, suffix))) + for l in in_node.links] + + return LPNode(consumptions=consumptions, productions=productions, links=links, storages=storages) - productions = [LPProduction(name=p.name, cost=float(p.cost), quantity=p.quantity[scn, t], - variable=self.solver.NumVar(0, float(p.quantity[scn, t]), 'prod {} on {} at t={} for scn={}'.format(p.name, name, t, scn))) - for p in self.study.nodes[name].productions] + def get_conv_var(self, name: str, t: int, scn: int) -> LPConverter: + """ + Map Converter to LPConverter. - links = [LPLink(dest=l.dest, cost=float(l.cost), src=name, quantity=l.quantity[scn, t], - variable=self.solver.NumVar(0, float(l.quantity[scn, t]), 'link on {} to {} at t={} for scn={}'.format(name, l.dest, t, scn))) - for l in self.study.nodes[name].links] + :param name: converter name + :param t: time step + :param scn: scenario index + :return: LPConverter + """ + suffix = 'at t=%d for scn=%d' % (t, scn) + v = self.study.converters[name] - return LPNode(consumptions=consumptions, productions=productions, links=links) + return LPConverter(name=v.name, src_ratios=v.src_ratios, dest_network=v.dest_network, dest_node=v.dest_node, + cost=v.cost, max=v.max, + var_flow_src={src: self.solver.NumVar(0, float(v.max / r), 'flow_src %s %s %s' % (v.name, ':'.join(src), suffix)) + for src, r in v.src_ratios.items()}, + var_flow_dest=self.solver.NumVar(0, float(v.max), 'flow_dest %s %s' % (v.name, suffix))) class OutputMapper: @@ -62,26 +91,44 @@ def __init__(self, study: Study): :param solver: ortools solver to use to fetch variable value :param study: input study to reproduce structure """ - self.nodes = {name: OutputNode.build_like_input(input) for name, input in study.nodes.items()} + zeros = np.zeros((study.nb_scn, study.horizon)) + def build_nodes(network: InputNetwork): + return {name: OutputNode.build_like_input(input, fill=zeros) for name, input in network.nodes.items()} - def set_var(self, name: str, t: int, scn: int, vars: LPNode): + self.networks = {name: OutputNetwork(nodes=build_nodes(network)) for name, network in study.networks.items()} + self.converters = {name: OutputConverter(name=name, flow_src={src: zeros for src in conv.src_ratios}, flow_dest=zeros) + for name, conv in study.converters.items()} + + def set_node_var(self, network: str, node: str, t: int, scn: int, vars: LPNode): """ Map linear programming node to global node (set inside intern attribute). - :param name: node name + :param network: network name + :param node: node name :param t: timestamp index :param scn: scenario index :param vars: linear programming node with ortools variables inside :return: None (use get_result) """ + out_node = self.networks[network].nodes[node] for i in range(len(vars.consumptions)): - self.nodes[name].consumptions[i].quantity[scn, t] = vars.consumptions[i].quantity - vars.consumptions[i].variable.solution_value() + out_node.consumptions[i].quantity[scn, t] = vars.consumptions[i].quantity - vars.consumptions[i].variable.solution_value() for i in range(len(vars.productions)): - self.nodes[name].productions[i].quantity[scn, t] = vars.productions[i].variable.solution_value() + out_node.productions[i].quantity[scn, t] = vars.productions[i].variable.solution_value() + + for i in range(len(vars.storages)): + out_node.storages[i].capacity[scn, t] = vars.storages[i].var_capacity.solution_value() + out_node.storages[i].flow_in[scn, t] = vars.storages[i].var_flow_in.solution_value() + out_node.storages[i].flow_out[scn, t] = vars.storages[i].var_flow_out.solution_value() for i in range(len(vars.links)): - self.nodes[name].links[i].quantity[scn, t] = vars.links[i].variable.solution_value() + self.networks[network].nodes[node].links[i].quantity[scn, t] = vars.links[i].variable.solution_value() + + def set_converter_var(self, name: str, t: int, scn: int, vars: LPConverter): + for src, var in vars.var_flow_src.items(): + self.converters[name].flow_src[src][scn, t] = var.solution_value() + self.converters[name].flow_dest[scn, t] = vars.var_flow_dest.solution_value() def get_result(self) -> Result: """ @@ -89,4 +136,4 @@ def get_result(self) -> Result: :return: final result after map all nodes """ - return Result(nodes=self.nodes) + return Result(networks=self.networks, converters=self.converters) diff --git a/hadar/optimizer/lp/optimizer.py b/hadar/optimizer/lp/optimizer.py index db80d0c..be0aa9d 100644 --- a/hadar/optimizer/lp/optimizer.py +++ b/hadar/optimizer/lp/optimizer.py @@ -6,17 +6,14 @@ # This file is part of hadar-simulator, a python adequacy library for everyone. import logging -import pickle -from functools import reduce - -import numpy as np import multiprocessing -from typing import List, Dict +import pickle +from typing import List -from ortools.linear_solver.pywraplp import Solver, Variable +from ortools.linear_solver.pywraplp import Solver, Constraint from hadar.optimizer.input import Study -from hadar.optimizer.lp.domain import LPNode, LPProduction, LPConsumption, LPLink +from hadar.optimizer.lp.domain import LPNode, LPProduction, LPConsumption, LPLink, LPStorage, LPTimeStep, LPConverter from hadar.optimizer.lp.mapper import InputMapper, OutputMapper from hadar.optimizer.output import Result @@ -47,6 +44,7 @@ def add_node(self, node: LPNode): """ self._add_consumption(node.consumptions) self._add_productions(node.productions) + self._add_storages(node.storages) self._add_links(node.links) def _add_consumption(self, consumptions: List[LPConsumption]): @@ -71,6 +69,16 @@ def _add_productions(self, prods: List[LPProduction]): self.objective.SetCoefficient(prod.variable, prod.cost) self.logger.debug('Add production %s into objective', prod.name) + def _add_storages(self, stors: List[LPStorage]): + """ + Add storage cost. Cost of store for each time-step + :param stors: list of storages + :return: + """ + for stor in stors: + self.objective.SetCoefficient(stor.var_capacity, stor.cost) + self.logger.debug('Add storage %s into objective', stor.name) + def _add_links(self, links: List[LPLink]): """ Add link cost. That mean cost to use a link capacity. @@ -82,6 +90,16 @@ def _add_links(self, links: List[LPLink]): self.objective.SetCoefficient(link.variable, link.cost) self.logger.debug('Add link %s->%s to objective', link.src, link.dest) + def add_converter(self, conv: LPConverter): + """ + Add converter. Apply cost on output of converter. + + :param conv: converter to cost + :return: + """ + self.objective.SetCoefficient(conv.var_flow_dest, conv.cost) + self.logger.debug('Add converter %s to objective' % conv.name) + def build(self): pass # Currently nothing are need at the end. But we keep builder pattern syntax @@ -102,64 +120,97 @@ def __init__(self, solver: Solver): self.solver = solver self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__) - def add_node(self, name: str, node: LPNode, t: int): + def add_node(self, name_network: str, name_node: str, node: LPNode, t: int): """ Add flow constraint for a specific node. - :param name: node name. Used to differentiate each equation + :param name_network: network name. Used to differentiate each equation + :param name_node: node name. Used to differentiate each equation :param node: node to map constraint :return: """ # Set forced consumption - load = sum([c.quantity for c in node.consumptions])*1.0 - self.constraints[(t, name)] = self.solver.Constraint(load, load) + load = sum([c.quantity for c in node.consumptions]) * 1.0 + self.constraints[(t, name_network, name_node)] = self.solver.Constraint(load, load) - self._add_consumptions(name, t, node.consumptions) - self._add_productions(name, t, node.productions) - self._add_links(name, t, node.links) + self._add_consumptions(name_network, name_node, t, node.consumptions) + self._add_productions(name_network, name_node, t, node.productions) + self._add_storages(name_network, name_node, t, node.storages) + self._add_links(name_network, name_node, t, node.links) - def _add_consumptions(self, name: str, t: int, consumptions: List[LPConsumption]): + def _add_consumptions(self, name_network: str, name_node: str, t: int, consumptions: List[LPConsumption]): """ Add consumption flow. That mean loss of consumption is set a production to match equation in case there are not enough production. - :param name: node's name + :param name_network: network's name + :param name_node: node's name :param t: timestamp :param consumptions: consumptions with loss as variable :return: """ for cons in consumptions: - self.constraints[(t, name)].SetCoefficient(cons.variable, 1) - self.logger.debug('Add lol %s for %s into adequacy constraint', cons.name, name) + self.constraints[(t, name_network, name_node)].SetCoefficient(cons.variable, 1) + self.logger.debug('Add lol %s for %s inside %s into adequacy constraint', cons.name, name_node, name_network) - def _add_productions(self, name: str, t: int, productions: List[LPProduction]): + def _add_productions(self, name_network: str, name_node: str, t: int, productions: List[LPProduction]): """ Add production flow. That mean production use is like a production. - :param name: node's name + :param name_network: network's name + :param name_node: node's name :param t: timestamp :param productions: productions with production used as variable :return: """ for prod in productions: - self.constraints[(t, name)].SetCoefficient(prod.variable, 1) - self.logger.debug('Add prod %s for %s into adequacy constraint', prod.name, name) + self.constraints[(t, name_network, name_node)].SetCoefficient(prod.variable, 1) + self.logger.debug('Add prod %s for %s inside %s into adequacy constraint', prod.name, name_node, name_network) - def _add_links(self, name: str, t: int, links: List[LPLink]): + def _add_storages(self, name_network: str, name_node: str, t: int, storages: List[LPStorage]): + """ + Add storage flow. Flow in is like a consumption. Flow out is like a production. + + :param name_network: network's name + :param name_node: node's name + :param t: timestamp + :param productions: storage with flow used as variable + :return: + """ + for stor in storages: + self.constraints[(t, name_network, name_node)].SetCoefficient(stor.var_flow_in, -1) + self.constraints[(t, name_network, name_node)].SetCoefficient(stor.var_flow_out, 1) + self.logger.debug('Add storage %s for %s inside %s into adequacy constraint', stor.name, name_node, name_network) + + def _add_links(self, name_network: str, name_node: str, t: int, links: List[LPLink]): """ Add links. That mean the link export is like a consumption. After all node added. The same export, become also an import for destination node. Therefore link has to be set like production for destination node. - :param name: node's name + :param name_network: network's name + :param name_node: node's name :param t: timestamp :param links: link with export quantity as variable :return: """ for link in links: - self.constraints[(t, link.src)].SetCoefficient(link.variable, -1) # Export from src - self.importations[(t, link.src, link.dest)] = link.variable # Import to dest - self.logger.debug('Add link %s for %s into adequacy constraint', link.dest, name) + self.constraints[(t, name_network, link.src)].SetCoefficient(link.variable, -1) # Export from src + self.importations[(t, name_network, link.src, link.dest)] = link.variable # Import to dest + self.logger.debug('Add link %s for %s inside %s into adequacy constraint', link.dest, name_node, name_network) + + def add_converter(self, conv: LPConverter, t: int): + """ + Add converter element in equation. Sources are like consumptions, destination like production + + :param conv: converter element to add + :param t: time index to use + :return: + """ + self.constraints[(t, conv.dest_network, conv.dest_node)].SetCoefficient(conv.var_flow_dest, 1) + for (network, node), var in conv.var_flow_src.items(): + self.constraints[(t, network, node)].SetCoefficient(var, -1) + self.logger.debug('Add converter %s' % conv.name) def build(self): """ @@ -168,8 +219,62 @@ def build(self): :return: """ # Apply import link in adequacy - for (t, src, dest), var in self.importations.items(): - self.constraints[(t, dest)].SetCoefficient(var, 1) + for (t, net, src, dest), var in self.importations.items(): + self.constraints[(t, net, dest)].SetCoefficient(var, 1) + + +class StorageBuilder: + """ + Build storage constraints + """ + + def __init__(self, solver: Solver): + self.capacities = dict() + self.solver = solver + self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__) + + def add_node(self, name_network: str, name_node: str, node: LPNode, t: int) -> Constraint: + for stor in node.storages: + self.capacities[(t, name_network, name_node, stor.name)] = stor.var_capacity + if t == 0: + const = self.solver.Constraint(stor.init_capacity, stor.init_capacity) + const.SetCoefficient(stor.var_flow_in, -stor.eff) + const.SetCoefficient(stor.var_flow_out, 1) + const.SetCoefficient(stor.var_capacity, 1) + return const + else: + const = self.solver.Constraint(0, 0) + const.SetCoefficient(stor.var_flow_in, -stor.eff) + const.SetCoefficient(stor.var_flow_out, 1) + const.SetCoefficient(self.capacities[(t-1, name_network, name_node, stor.name)], -1) + const.SetCoefficient(stor.var_capacity, 1) + return const + + def build(self): + pass # Currently nothing are need at the end. But we keep builder pattern syntax + + +class ConverterMixBuilder: + """ + Build equation to determine ratio mix between sources converter. + """ + def __init__(self, solver: Solver): + self.solver = solver + self.logger = logging.getLogger(__name__ + '.' + self.__class__.__name__) + + def add_converter(self, conv: LPConverter): + return [ConverterMixBuilder._build_constraint(self.solver, r, conv.var_flow_dest, conv.var_flow_src[src]) + for src, r in conv.src_ratios.items()] + + @staticmethod + def _build_constraint(solver, r, var_dest, var_src): + const = solver.Constraint(0, 0) + const.SetCoefficient(var_src, r) + const.SetCoefficient(var_dest, -1) + return const + + def build(self): + pass # Currently nothing are need at the end. But we keep builder pattern syntax def _solve_batch(params) -> bytes: @@ -186,23 +291,39 @@ def _solve_batch(params) -> bytes: objective = ObjectiveBuilder(solver=solver) adequacy = AdequacyBuilder(solver=solver) + storage = StorageBuilder(solver=solver) + mix = ConverterMixBuilder(solver=solver) in_mapper = InputMapper(solver=solver, study=study) else: # Test purpose only - study, i_scn, solver, objective, adequacy, in_mapper = params + study, i_scn, solver, objective, adequacy, storage, mix, in_mapper = params - variables = [dict() for _ in range(study.horizon)] + variables = [LPTimeStep.create_like_study(study) for _ in range(study.horizon)] # Build equation for t in range(0, study.horizon): - for name, node in study.nodes.items(): - node = in_mapper.get_var(name=name, t=t, scn=i_scn) - variables[t][name] = node - adequacy.add_node(name=name, node=node, t=t) - objective.add_node(node=node) + + # Build node constraints + for name_network, network in study.networks.items(): + for name_node, node in network.nodes.items(): + node = in_mapper.get_node_var(network=name_network, node=name_node, t=t, scn=i_scn) + variables[t].networks[name_network].nodes[name_node] = node + adequacy.add_node(name_network=name_network, name_node=name_node, node=node, t=t) + storage.add_node(name_network=name_network, name_node=name_node, node=node, t=t) + objective.add_node(node=node) + + # Build converter constraints + for name in study.converters: + conv = in_mapper.get_conv_var(name=name, t=t, scn=i_scn) + variables[t].converters[name] = conv + adequacy.add_converter(conv=conv, t=t) + mix.add_converter(conv=conv) + objective.add_converter(conv=conv) objective.build() adequacy .build() + storage.build() + mix.build() logger.info('Problem build. Start solver') solver.EnableOutput() @@ -225,7 +346,7 @@ def solve_lp(study: Study, out_mapper=None) -> Result: :param out_mapper: use only for test purpose to inject mock. Keep None as default. :return: Result object with optimal solution """ - out_mapper = OutputMapper(study) if out_mapper is None else out_mapper + out_mapper = out_mapper or OutputMapper(study) pool = multiprocessing.Pool() byte = pool.map(_solve_batch, ((study, i_scn) for i_scn in range(study.nb_scn))) @@ -233,7 +354,13 @@ def solve_lp(study: Study, out_mapper=None) -> Result: for scn in range(0, study.nb_scn): for t in range(0, study.horizon): - for name in study.nodes.keys(): - out_mapper.set_var(name=name, t=t, scn=scn, vars=variables[scn][t][name]) + # Set node elements + for name_network, network in study.networks.items(): + for name_node in network.nodes.keys(): + out_mapper.set_node_var(network=name_network, node=name_node, t=t, scn=scn, + vars=variables[scn][t].networks[name_network].nodes[name_node]) + # Set converters + for name_conv in study.converters: + out_mapper.set_converter_var(name=name_conv, t=t, scn=scn, vars=variables[scn][t].converters[name_conv]) return out_mapper.get_result() diff --git a/hadar/optimizer/optimizer.py b/hadar/optimizer/optimizer.py index 0893fba..85ebe59 100644 --- a/hadar/optimizer/optimizer.py +++ b/hadar/optimizer/optimizer.py @@ -8,11 +8,10 @@ from abc import ABC, abstractmethod from hadar.optimizer.input import Study -from hadar.optimizer.output import Result from hadar.optimizer.lp.optimizer import solve_lp +from hadar.optimizer.output import Result from hadar.optimizer.remote.optimizer import solve_remote - __all__ = ['LPOptimizer', 'RemoteOptimizer'] diff --git a/hadar/optimizer/output.py b/hadar/optimizer/output.py index 9d825c0..3a58174 100644 --- a/hadar/optimizer/output.py +++ b/hadar/optimizer/output.py @@ -4,49 +4,22 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. +from copy import deepcopy +from typing import Union, List, Dict, Tuple import numpy as np -from typing import Union, List, Dict +from hadar.optimizer.input import InputNode, JSON -from hadar.optimizer.input import InputNode +__all__ = ['OutputProduction', 'OutputNode', 'OutputStorage', 'OutputLink', 'OutputConsumption', 'OutputNetwork', + 'OutputConverter', 'Result'] -__all__ = ['OutputProduction', 'OutputNode', 'OutputLink', 'OutputConsumption', 'Result'] - - -class DTO: - """ - Implement basic method for DTO objects - """ - - def __hash__(self): - return hash(tuple(sorted(self.__dict__.items()))) - - def __eq__(self, other): - if not isinstance(other, type(self)): - return False - for name, att in self.__dict__.items(): - if isinstance(att, (np.ndarray, np.generic)): - if not np.array_equal(self.__dict__[name], other.__dict__[name]): - return False - elif self.__dict__[name] != other.__dict__[name]: - return False - return True - - def __str__(self): - return "{}({})".format(type(self).__name__, - ", ".join(["{}={}".format(k, str(self.__dict__[k])) for k in sorted(self.__dict__)])) - - def __repr__(self): - return self.__str__() - - -class OutputConsumption(DTO): +class OutputConsumption(JSON): """ Consumption element """ - def __init__(self, quantity: Union[np.ndarray, list], cost: int = 0, name: str = ''): + def __init__(self, quantity: Union[np.ndarray, list], cost: Union[np.ndarray, list], name: str = ''): """ Create instance. @@ -54,16 +27,21 @@ def __init__(self, quantity: Union[np.ndarray, list], cost: int = 0, name: str = :param cost: cost of unavailability :param name: consumption name (unique in a node) """ - self.cost = cost + self.cost = np.array(cost) self.quantity = np.array(quantity) self.name = name -class OutputProduction(DTO): + @staticmethod + def from_json(dict): + return OutputConsumption(**dict) + + +class OutputProduction(JSON): """ Production element """ - def __init__(self, quantity: Union[np.ndarray, list], cost: int = 0, name: str = 'in'): + def __init__(self, quantity: Union[np.ndarray, list], cost: Union[np.ndarray, list], name: str = 'in'): """ Create instance. @@ -72,15 +50,43 @@ def __init__(self, quantity: Union[np.ndarray, list], cost: int = 0, name: str = :param name: production name (unique in a node) """ self.name = name - self.cost = cost + self.cost = np.array(cost) self.quantity = np.array(quantity) + @staticmethod + def from_json(dict): + return OutputProduction(**dict) + + +class OutputStorage(JSON): + """ + Storage element + """ + def __init__(self, name: str, capacity: Union[np.ndarray, list], + flow_in: Union[np.ndarray, list], flow_out: Union[np.ndarray, list]): + """ + Create instance. + + :param name: storage name + :param capacity: final capacity + :param flow_in: final input flow + :param flow_out: final output flow + """ + self.name = name + self.capacity = np.array(capacity) + self.flow_in = np.array(flow_in) + self.flow_out = np.array(flow_out) + + @staticmethod + def from_json(dict): + return OutputStorage(**dict) -class OutputLink(DTO): + +class OutputLink(JSON): """ Link element """ - def __init__(self, dest: str, quantity: Union[np.ndarray, list], cost: int = 0): + def __init__(self, dest: str, quantity: Union[np.ndarray, list], cost: Union[np.ndarray, list]): """ Create instance. @@ -90,58 +96,131 @@ def __init__(self, dest: str, quantity: Union[np.ndarray, list], cost: int = 0): """ self.dest = dest self.quantity = np.array(quantity) - self.cost = cost + self.cost = np.array(cost) + + @staticmethod + def from_json(dict): + return OutputLink(**dict) -class OutputNode(DTO): +class OutputConverter(JSON): + """ + Converter element + """ + def __init__(self, name: str, flow_src: Dict[Tuple[str, str], Union[np.ndarray, List]], flow_dest: Union[np.ndarray, List]): + """ + Create instance. + + :param name: converter name + :param flow_src: flow from sources + :param flow_dest: flow to destination + """ + self.name = name + self.flow_src = {src: np.array(qt) for src, qt in flow_src.items()} + self.flow_dest = np.array(flow_dest) + + def to_json(self) -> dict: + dict = deepcopy(self.__dict__) + # flow_src has a tuple of two string as key. These forbidden by JSON. + # Therefore when serialized we join these two strings with '::' to create on string as key + # Ex: ('elec', 'a') --> 'elec::a' + dict['flow_src'] = {'::'.join(k): v.tolist() for k, v in self.flow_src.items()} + dict['flow_dest'] = self.flow_dest.tolist() + return dict + + @staticmethod + def from_json(dict: dict): + # When deserialize, we need to split key string of src_network. + # JSON doesn't accept tuple as key, so two string was joined for serialization + # Ex: 'elec::a' -> ('elec', 'a') + dict['flow_src'] = {tuple(k.split('::')): v for k, v in dict['flow_src'].items()} + return OutputConverter(**dict) + + +class OutputNode(JSON): """ Node element """ def __init__(self, consumptions: List[OutputConsumption], productions: List[OutputProduction], + storages: List[OutputStorage], links: List[OutputLink]): """ Create Node. :param consumptions: consumptions list :param productions: productions list + :param storages: storages list :param links: link list """ self.consumptions = consumptions self.productions = productions + self.storages = storages self.links = links @staticmethod - def build_like_input(input: InputNode): + def build_like_input(input: InputNode, fill: np.ndarray): """ Use an input node to create an output node. Keep list elements fill quantity by zeros. :param input: InputNode to copy + :param fill: array to use to fill data :return: OutputNode like InputNode with all quantity at zero """ - output = OutputNode(consumptions=[], productions=[], links=[]) - - output.consumptions = [OutputConsumption(name=i.name, cost=i.cost, quantity=np.zeros_like(i.quantity)) + output = OutputNode(consumptions=[], productions=[], storages=[], links=[]) + output.consumptions = [OutputConsumption(name=i.name, cost=i.cost, quantity=fill) for i in input.consumptions] - output.productions = [OutputProduction(name=i.name, cost=i.cost, quantity=np.zeros_like(i.quantity)) + output.productions = [OutputProduction(name=i.name, cost=i.cost, quantity=fill) for i in input.productions] - output.links = [OutputLink(dest=i.dest, cost=i.cost, quantity=np.zeros_like(i.quantity)) + output.storages = [OutputStorage(name=i.name, capacity=fill, + flow_out=fill, flow_in=fill) + for i in input.storages] + output.links = [OutputLink(dest=i.dest, cost=i.cost, quantity=fill) for i in input.links] return output + @staticmethod + def from_json(dict): + dict['consumptions'] = [OutputConsumption.from_json(v) for v in dict['consumptions']] + dict['productions'] = [OutputProduction.from_json(v) for v in dict['productions']] + dict['storages'] = [OutputStorage.from_json(v) for v in dict['storages']] + dict['links'] = [OutputLink.from_json(v) for v in dict['links']] + return OutputNode(**dict) + -class Result(DTO): +class OutputNetwork(JSON): """ - Result of study + Network element """ + def __init__(self, nodes: Dict[str, OutputNode]): + """ + Create network + :param nodes: nodes belongs to network + """ + self.nodes = nodes + + @staticmethod + def from_json(dict): + dict['nodes'] = {k: OutputNode.from_json(v) for k, v in dict['nodes'].items()} + return OutputNetwork(**dict) + + +class Result(JSON): + """ + Result of study + """ + def __init__(self, networks: Dict[str, OutputNetwork], converters: Dict[str, OutputConverter]): """ Create result - :param nodes: list of nodes present in network + :param networks: list of networks present in study """ - self._nodes = nodes + self.networks = networks + self.converters = converters + - @property - def nodes(self): - return self._nodes + @staticmethod + def from_json(dict): + return Result(networks={k: OutputNetwork.from_json(v) for k, v in dict['networks'].items()}, + converters={k: OutputConverter.from_json(v) for k, v in dict['converters'].items()}) diff --git a/hadar/optimizer/remote/optimizer.py b/hadar/optimizer/remote/optimizer.py index ff0f1b2..e07ec06 100644 --- a/hadar/optimizer/remote/optimizer.py +++ b/hadar/optimizer/remote/optimizer.py @@ -4,9 +4,7 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. - import logging -import pickle import sys from time import sleep @@ -17,7 +15,6 @@ from hadar.optimizer.input import Study from hadar.optimizer.output import Result - logger = logging.getLogger(__name__) @@ -44,25 +41,12 @@ def solve_remote(study: Study, url: str, token: str = 'none') -> Result: :param token: authorized token (default server config doesn't use token) :return: result received from server """ - return _solve_remote_wrap(study, url, token, requests) - - -def _solve_remote_wrap(study: Study, url: str, token: str = 'none', rqt=None) -> Result: - """ - Same method than solve_remote but with with request library in parameter to inject mock during test. - - :param study: study to resolve - :param url: server url - :param token: authorized token (default server config doesn't use token) - :param rqt: requests library, main requests when use by user, mock when testing. - :return: result received from server - """ # Send study - resp = rqt.post(url='%s/study' % url, data=pickle.dumps(study), params={'token': token}) + resp = requests.post(url='%s/api/v1/study' % url, json=study.to_json(), params={'token': token}) check_code(resp.status_code) # Deserialize - resp = pickle.loads(resp.content) + resp = resp.json() id = resp['job'] Bar.check_tty = Spinner.check_tty = False @@ -71,9 +55,9 @@ def _solve_remote_wrap(study: Study, url: str, token: str = 'none', rqt=None) -> spinner = None while resp['status'] in ['QUEUED', 'COMPUTING']: - resp = rqt.get(url='%s/result/%s' % (url, id), params={'token': token}) + resp = requests.get(url='%s/api/v1/result/%s' % (url, id), params={'token': token}) check_code(resp.status_code) - resp = pickle.loads(resp.content) + resp = resp.json() if resp['status'] == 'QUEUED': bar.goto(resp['progress']) @@ -89,4 +73,4 @@ def _solve_remote_wrap(study: Study, url: str, token: str = 'none', rqt=None) -> if resp['status'] == 'ERROR': raise ServerError(resp['message']) - return resp['result'] + return Result.from_json(resp['result']) diff --git a/hadar/viewer/abc.py b/hadar/viewer/abc.py index 3af8df2..bead62a 100644 --- a/hadar/viewer/abc.py +++ b/hadar/viewer/abc.py @@ -4,11 +4,11 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. +from abc import ABC, abstractmethod from typing import List, Tuple, Dict import numpy as np import pandas as pd -from abc import ABC, abstractmethod from hadar.analyzer.result import ResultAnalyzer @@ -51,6 +51,17 @@ def gaussian(self, rac: np.ndarray, qt: np.ndarray, title: str): """ pass + @abstractmethod + def candles(self, open: np.ndarray, close: np.ndarray, title: str): + """ + Plot candle stick with open close + :param open: candle open data + :param close: candle close data + :param title: title to plot + :return: + """ + pass + @abstractmethod def stack(self, areas: List[Tuple[str, np.ndarray]], lines: List[Tuple[str, np.ndarray]], title: str): """ @@ -103,18 +114,20 @@ class ConsumptionFluentAPISelector(FluentAPISelector): """ Consumption level of fluent api. """ - def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, name: str, node: str, kind: str): + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, + network: str, name: str, node: str, kind: str): FluentAPISelector.__init__(self, plotting, agg) self.name = name self.node = node self.kind = kind + self.network = network def timeline(self): """ Plot timeline graphics. :return: """ - cons = self.agg.network().node(self.node).consumption(self.name).scn().time()[self.kind] + cons = self.agg.network(self.network).node(self.node).consumption(self.name).scn().time()[self.kind] title = 'Consumptions %s for %s on node %s' % (self.kind, self.name, self.node) return self.plotting.timeline(cons, title) @@ -129,10 +142,10 @@ def monotone(self, t: int = None, scn: int = None): FluentAPISelector.not_both(t, scn) if t is not None: - y = self.agg.network().node(self.node).consumption(self.name).time(t).scn()[self.kind].values + y = self.agg.network(self.network).node(self.node).consumption(self.name).time(t).scn()[self.kind].values title = 'Monotone consumption of %s on node %s at t=%0d' % (self.name, self.node, t) elif scn is not None: - y = self.agg.network().node(self.node).consumption(self.name).scn(scn).time()[self.kind].values + y = self.agg.network(self.network).node(self.node).consumption(self.name).scn(scn).time()[self.kind].values title = 'Monotone consumption of %s on node %s at scn=%0d' % (self.name, self.node, scn) return self.plotting.monotone(y, title) @@ -148,12 +161,12 @@ def gaussian(self, t: int = None, scn: int = None): FluentAPISelector.not_both(t, scn) if t is None: - cons = self.agg.network().node(self.node).consumption(self.name).scn(scn).time()[self.kind].values - rac = self.agg.get_rac()[scn, :] + cons = self.agg.network(self.network).node(self.node).consumption(self.name).scn(scn).time()[self.kind].values + rac = self.agg.get_rac(network=self.network)[scn, :] title = 'Gaussian consumption of %s on node %s at scn=%0d' % (self.name, self.node, scn) elif scn is None: - cons = self.agg.network().node(self.node).consumption(self.name).time(t).scn()[self.kind].values - rac = self.agg.get_rac()[:, t] + cons = self.agg.network(self.network).node(self.node).consumption(self.name).time(t).scn()[self.kind].values + rac = self.agg.get_rac(network=self.network)[:, t] title = 'Gaussian consumption of %s on node %s at t=%0d' % (self.name, self.node, t) return self.plotting.gaussian(rac=rac, qt=cons, title=title) @@ -163,18 +176,20 @@ class ProductionFluentAPISelector(FluentAPISelector): """ Production level of fluent api """ - def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, name: str, node: str, kind: str): + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, + network: str, name: str, node: str, kind: str): FluentAPISelector.__init__(self, plotting, agg) self.name = name self.node = node self.kind = kind + self.network = network def timeline(self): """ Plot timeline graphics. :return: """ - prod = self.agg.network().node(self.node).production(self.name).scn().time()[self.kind] + prod = self.agg.network(self.network).node(self.node).production(self.name).scn().time()[self.kind] title = 'Production %s for %s on node %s' % (self.kind, self.name, self.node) return self.plotting.timeline(prod, title) @@ -189,10 +204,10 @@ def monotone(self, t: int = None, scn: int = None): FluentAPISelector.not_both(t, scn) if t is not None: - y = self.agg.network().node(self.node).production(self.name).time(t).scn()[self.kind].values + y = self.agg.network(self.network).node(self.node).production(self.name).time(t).scn()[self.kind].values title = 'Monotone production of %s on node %s at t=%0d' % (self.name, self.node, t) elif scn is not None: - y = self.agg.network().node(self.node).production(self.name).scn(scn).time()[self.kind].values + y = self.agg.network(self.network).node(self.node).production(self.name).scn(scn).time()[self.kind].values title = 'Monotone production of %s on node %s at scn=%0d' % (self.name, self.node, scn) return self.plotting.monotone(y, title) @@ -208,33 +223,82 @@ def gaussian(self, t: int = None, scn: int = None): FluentAPISelector.not_both(t, scn) if t is None: - prod = self.agg.network().node(self.node).production(self.name).scn(scn).time()[self.kind].values - rac = self.agg.get_rac()[scn, :] + prod = self.agg.network(self.network).node(self.node).production(self.name).scn(scn).time()[self.kind].values + rac = self.agg.get_rac(network=self.network)[scn, :] title = 'Gaussian production of %s on node %s at scn=%0d' % (self.name, self.node, scn) elif scn is None: - prod = self.agg.network().node(self.node).production(self.name).time(t).scn()[self.kind].values - rac = self.agg.get_rac()[:, t] + prod = self.agg.network(self.network).node(self.node).production(self.name).time(t).scn()[self.kind].values + rac = self.agg.get_rac(network=self.network)[:, t] title = 'Gaussian production of %s on node %s at t=%0d' % (self.name, self.node, t) return self.plotting.gaussian(rac=rac, qt=prod, title=title) +class StorageFluentAPISelector(FluentAPISelector): + """ + Storage level of fluent API + """ + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, + network: str, node: str, name: str): + FluentAPISelector.__init__(self, plotting, agg) + self.network = network + self.node = node + self.name = name + + def candles(self, scn: int = 0): + df = self.agg.network(self.network).node(self.node).storage(self.name).scn(scn).time() + df.sort_index(ascending=True, inplace=True) + + open = np.append(df['init_capacity'][0], (df['flow_in'] * df['eff'] - df['flow_out']).values) + open = open.cumsum() + close = open[1:] + open = open[:-1] + + title = 'Stockage capacity of %s on node %s for scn=%d' % (self.name, self.node, scn) + return self.plotting.candles(open=open, close=close, title=title) + + def monotone(self, t: int = None, scn: int = None): + """ + Plot monotone graphics. + + :param t: focus on t index + :param scn: focus on scn index if t not given + :return: + """ + FluentAPISelector.not_both(t, scn) + + if t is not None: + df = self.agg.network(self.network).node(self.node).storage(self.name).time(t).scn() + df.sort_index(ascending=True, inplace=True) + y = (df['flow_in'] - df['flow_out']).values + title = 'Monotone storage of %s on node %s at t=%0d' % (self.name, self.node, t) + if scn is not None: + df = self.agg.network(self.network).node(self.node).storage(self.name).scn(scn).time() + df.sort_index(ascending=True, inplace=True) + y = (df['flow_in'] - df['flow_out']).values + title = 'Monotone storage of %s on node %s for scn=%0d' % (self.name, self.node, scn) + + return self.plotting.monotone(y, title) + + class LinkFluentAPISelector(FluentAPISelector): """ Link level of fluent api """ - def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, src: str, dest: str, kind: str): + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, + network: str, src: str, dest: str, kind: str): FluentAPISelector.__init__(self, plotting, agg) self.src = src self.dest = dest self.kind = kind + self.network = network def timeline(self): """ Plot timeline graphics. :return: """ - links = self.agg.network().node(self.src).link(self.dest).scn().time()[self.kind] + links = self.agg.network(self.network).node(self.src).link(self.dest).scn().time()[self.kind] title = 'Link %s from %s to %s' % (self.kind, self.src, self.dest) return self.plotting.timeline(links, title) @@ -249,10 +313,10 @@ def monotone(self, t: int = None, scn: int = None): FluentAPISelector.not_both(t, scn) if t is not None: - y = self.agg.network().node(self.src).link(self.dest).time(t).scn()[self.kind].values + y = self.agg.network(self.network).node(self.src).link(self.dest).time(t).scn()[self.kind].values title = 'Monotone link from %s to %s at t=%0d' % (self.src, self.dest, t) elif scn is not None: - y = self.agg.network().node(self.src).link(self.dest).scn(scn).time()[self.kind].values + y = self.agg.network(self.network).node(self.src).link(self.dest).scn(scn).time()[self.kind].values title = 'Monotone link from %s to %s at scn=%0d' % (self.src, self.dest, scn) return self.plotting.monotone(y, title) @@ -268,24 +332,147 @@ def gaussian(self, t: int = None, scn: int = None): FluentAPISelector.not_both(t, scn) if t is None: - prod = self.agg.network().node(self.src).link(self.dest).scn(scn).time()[self.kind].values - rac = self.agg.get_rac()[scn, :] - title = 'Gaussian link from %s to %s at t=%0d' % (self.src, self.dest, scn) + prod = self.agg.network(self.network).node(self.src).link(self.dest).scn(scn).time()[self.kind].values + rac = self.agg.get_rac(network=self.network)[scn, :] + title = 'Gaussian link from %s to %s at scn=%0d' % (self.src, self.dest, scn) elif scn is None: - prod = self.agg.network().node(self.src).link(self.dest).time(t).scn()[self.kind].values - rac = self.agg.get_rac()[:, t] + prod = self.agg.network(self.network).node(self.src).link(self.dest).time(t).scn()[self.kind].values + rac = self.agg.get_rac(network=self.network)[:, t] title = 'Gaussian link from %s to %s at t=%0d' % (self.src, self.dest, t) return self.plotting.gaussian(rac=rac, qt=prod, title=title) +class SrcConverterFluentAPISelector(FluentAPISelector): + """ + Source converter level of fluent api + """ + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, + network: str, node: str, name: str): + FluentAPISelector.__init__(self, plotting, agg) + self.node = node + self.name = name + self.network = network + + def timeline(self): + """ + Plot timeline graphics. + :return: + """ + links = self.agg.network(self.network).node(self.node).to_converter(self.name).scn().time()['flow'] + title = 'Timeline converter %s from node %s' % (self.name, self.node) + return self.plotting.timeline(links, title) + + def monotone(self, t: int = None, scn: int = None): + """ + Plot monotone graphics. + + :param t: focus on t index + :param scn: focus on scn index if t not given + :return: + """ + FluentAPISelector.not_both(t, scn) + + if t is not None: + y = self.agg.network(self.network).node(self.node).to_converter(self.name).time(t).scn()['flow'].values + title = 'Timeline converter %s from node %s at t=%0d' % (self.name, self.node, t) + elif scn is not None: + y = self.agg.network(self.network).node(self.node).to_converter(self.name).scn(scn).time()['flow'].values + title = 'Timeline converter %s from node %s at scn=%0d' % (self.name, self.node, scn) + + return self.plotting.monotone(y, title) + + def gaussian(self, t: int = None, scn: int = None): + """ + Plot gaussian graphics + + :param t: focus on t index + :param scn: focus on scn index if t not given + :return: + """ + FluentAPISelector.not_both(t, scn) + + if t is None: + prod = self.agg.network(self.network).node(self.node).to_converter(self.name).time(t).scn()['flow'].values + rac = self.agg.get_rac(network=self.network)[scn, :] + title = 'Gaussian converter %s from node %s at scn=%0d' % (self.name, self.node, scn) + elif scn is None: + prod = self.agg.network(self.network).node(self.node).to_converter(self.name).time(t).scn()['flow'].values + rac = self.agg.get_rac(network=self.network)[:, t] + title = 'Gaussian converter %s from node %s at t=%0d' % (self.name, self.node, t) + + return self.plotting.gaussian(rac=rac, qt=prod, title=title) + + +class DestConverterFluentAPISelector(FluentAPISelector): + """ + Source converter level of fluent api + """ + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, + network: str, node: str, name: str): + FluentAPISelector.__init__(self, plotting, agg) + self.node = node + self.name = name + self.network = network + + def timeline(self): + """ + Plot timeline graphics. + :return: + """ + links = self.agg.network(self.network).node(self.node).from_converter(self.name).scn().time()['flow'] + title = 'Timeline converter %s to node %s' % (self.name, self.node) + return self.plotting.timeline(links, title) + + def monotone(self, t: int = None, scn: int = None): + """ + Plot monotone graphics. + + :param t: focus on t index + :param scn: focus on scn index if t not given + :return: + """ + FluentAPISelector.not_both(t, scn) + + if t is not None: + y = self.agg.network(self.network).node(self.node).from_converter(self.name).time(t).scn()['flow'].values + title = 'Timeline converter %s to node %s at t=%0d' % (self.name, self.node, t) + elif scn is not None: + y = self.agg.network(self.network).node(self.node).from_converter(self.name).scn(scn).time()['flow'].values + title = 'Timeline converter %s to node %s at scn=%0d' % (self.name, self.node, scn) + + return self.plotting.monotone(y, title) + + def gaussian(self, t: int = None, scn: int = None): + """ + Plot gaussian graphics + + :param t: focus on t index + :param scn: focus on scn index if t not given + :return: + """ + FluentAPISelector.not_both(t, scn) + + if t is None: + prod = self.agg.network(self.network).node(self.node).from_converter(self.name).time(t).scn()['flow'].values + rac = self.agg.get_rac(network=self.network)[scn, :] + title = 'Gaussian converter %s to node %s at scn=%0d' % (self.name, self.node, scn) + elif scn is None: + prod = self.agg.network(self.network).node(self.node).from_converter(self.name).time(t).scn()['flow'].values + rac = self.agg.get_rac(network=self.network)[:, t] + title = 'Gaussian converter %s to node %s at t=%0d' % (self.name, self.node, t) + + return self.plotting.gaussian(rac=rac, qt=prod, title=title) + + class NodeFluentAPISelector(FluentAPISelector): """ Node level of fluent api """ - def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, node: str): + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, network: str, node: str): FluentAPISelector.__init__(self, plotting, agg) self.node = node + self.network = network def stack(self, scn: int = 0, prod_kind: str = 'used', cons_kind: str = 'asked'): """ @@ -297,17 +484,29 @@ def stack(self, scn: int = 0, prod_kind: str = 'used', cons_kind: str = 'asked') :param cons_kind: select which cons to stack : 'asked' or 'given' :return: plotly figure or jupyter widget to plot """ - c, p, b = self.agg.get_elements_inside(node=self.node) + c, p, s, b, ve, vi = self.agg.get_elements_inside(node=self.node, network=self.network) areas = [] # stack production with area if p > 0: - prod = self.agg.network().scn(scn).node(self.node).production().time().sort_values('cost', ascending=False) + prod = self.agg.network(self.network).scn(scn).node(self.node).production().time().sort_values('cost', ascending=False) for i, name in enumerate(prod.index.get_level_values('name').unique()): areas.append((name, prod.loc[name][prod_kind].sort_index().values)) + # add storage output flow + if s > 0: + stor = self.agg.network(self.network).scn(scn).node(self.node).storage().time().sort_values('cost', ascending=False) + for i, name in enumerate(stor.index.get_level_values('name').unique()): + areas.append((name, stor.loc[name]['flow_out'].sort_index().values)) + + # Add converter importation + if vi > 0: + conv = self.agg.network(self.network).scn(scn).node(self.node).from_converter().time() + for i, name in enumerate(conv.index.get_level_values('name').unique()): + areas.append((name, conv.loc[name, 'flow'].sort_index().values)) + # add import in production stack - balance = self.agg.get_balance(node=self.node)[scn] + balance = self.agg.get_balance(node=self.node, network=self.network)[scn] im = -np.clip(balance, None, 0) if not (im == 0).all(): areas.append(('import', im)) @@ -315,10 +514,22 @@ def stack(self, scn: int = 0, prod_kind: str = 'used', cons_kind: str = 'asked') lines = [] # Stack consumptions with line if c > 0: - cons = self.agg.network().scn(scn).node(self.node).consumption().time().sort_values('cost', ascending=False) + cons = self.agg.network(self.network).scn(scn).node(self.node).consumption().time().sort_values('cost', ascending=False) for i, name in enumerate(cons.index.get_level_values('name').unique()): lines.append((name, cons.loc[name][cons_kind].sort_index().values)) + # add storage output intput + if s > 0: + stor = self.agg.network(self.network).scn(scn).node(self.node).storage().time().sort_values('cost', ascending=False) + for i, name in enumerate(stor.index.get_level_values('name').unique()): + lines.append((name, stor.loc[name]['flow_in'].sort_index().values)) + + # Add converter exportation + if ve > 0: + conv = self.agg.network(self.network).scn(scn).node(self.node).to_converter().time() + for i, name in enumerate(conv.index.get_level_values('name').unique()): + lines.append((name, conv.loc[name, 'flow'].sort_index().values)) + # Add export in consumption stack exp = np.clip(balance, 0, None) if not (exp == 0).all(): @@ -336,7 +547,8 @@ def consumption(self, name: str, kind: str = 'given') -> ConsumptionFluentAPISel :param kind: kind of data 'asked' or 'given' :return: """ - return ConsumptionFluentAPISelector(plotting=self.plotting, agg=self.agg, node=self.node, name=name, kind=kind) + return ConsumptionFluentAPISelector(plotting=self.plotting, agg=self.agg, + network=self.network, node=self.node, name=name, kind=kind) def production(self, name: str, kind: str = 'used') -> ProductionFluentAPISelector: """ @@ -346,7 +558,18 @@ def production(self, name: str, kind: str = 'used') -> ProductionFluentAPISelect :param kind: kind of data available ('avail') or 'used' :return: """ - return ProductionFluentAPISelector(plotting=self.plotting, agg=self.agg, node=self.node, name=name, kind=kind) + return ProductionFluentAPISelector(plotting=self.plotting, agg=self.agg, + network=self.network, node=self.node, name=name, kind=kind) + + def storage(self, name: str) -> StorageFluentAPISelector: + """ + Got o storage level of fluent API + + :param name: select storage name + :return: + """ + return StorageFluentAPISelector(plotting=self.plotting, agg=self.agg, + network=self.network, node=self.node, name=name) def link(self, dest: str, kind: str = 'used'): """ @@ -356,7 +579,26 @@ def link(self, dest: str, kind: str = 'used'): :param kind: kind of data available ('avail') or 'used' :return: """ - return LinkFluentAPISelector(plotting=self.plotting, agg=self.agg, src=self.node, dest=dest, kind=kind) + return LinkFluentAPISelector(plotting=self.plotting, agg=self.agg, + network=self.network, src=self.node, dest=dest, kind=kind) + + def to_converter(self, name: str): + """ + get a converter exportation level fluent API + :param name: + :return: + """ + return SrcConverterFluentAPISelector(plotting=self.plotting, agg=self.agg, network=self.network, + node=self.node, name=name) + + def from_converter(self, name: str): + """ + get a converter importation level fluent API + :param name: + :return: + """ + return DestConverterFluentAPISelector(plotting=self.plotting, agg=self.agg, network=self.network, + node=self.node, name=name) class NetworkFluentAPISelector(FluentAPISelector): @@ -364,13 +606,17 @@ class NetworkFluentAPISelector(FluentAPISelector): Network level of fluent API """ + def __init__(self, plotting: ABCElementPlotting, agg: ResultAnalyzer, network: str): + FluentAPISelector.__init__(self, plotting, agg) + self.network = network + def rac_matrix(self): """ plot RAC matrix graphics :return: """ - rac = self.agg.get_rac() + rac = self.agg.get_rac(self.network) pct = (rac >= 0).sum() / rac.size * 100 title = "RAC Matrix %0d %% passed" % pct @@ -386,14 +632,14 @@ def map(self, t: int, zoom: int, scn: int = 0, limit: int = None): :param limit: color scale limite to use :return: """ - nodes = {node: self.agg.get_balance(node=node)[scn, t] for node in self.agg.nodes} + nodes = {node: self.agg.get_balance(node=node, network=self.network)[scn, t] for node in self.agg.nodes(self.network)} if limit is None: limit = max(max(nodes.values()), -min(nodes.values())) lines = {} # Compute lines - links = self.agg.network().scn(scn).time(t).node().link() + links = self.agg.network(self.network).scn(scn).time(t).node().link() for src in links.index.get_level_values('node').unique(): for dest in links.loc[src].index.get_level_values('dest').unique(): exchange = links.loc[src, dest]['used'] # forward @@ -413,7 +659,7 @@ def node(self, node: str): :param node: node name :return: NodeFluentAPISelector """ - return NodeFluentAPISelector(plotting=self.plotting, agg=self.agg, node=node) + return NodeFluentAPISelector(plotting=self.plotting, agg=self.agg, node=node, network=self.network) class ABCPlotting(ABC): @@ -449,10 +695,11 @@ def __init__(self, agg: ResultAnalyzer, else: self.time_index = np.arange(self.agg.horizon) - def network(self): + def network(self, network: str = 'default'): """ Entry point to use fluent API. + :param network: select network to anlyze. Default is 'default' :return: NetworkFluentAPISelector """ - return NetworkFluentAPISelector(plotting=self.plotting, agg=self.agg) + return NetworkFluentAPISelector(plotting=self.plotting, agg=self.agg, network=network) diff --git a/hadar/viewer/html.py b/hadar/viewer/html.py index c53e544..7aaaf4e 100644 --- a/hadar/viewer/html.py +++ b/hadar/viewer/html.py @@ -98,6 +98,17 @@ def _gaussian(x, m, o): return fig + def candles(self, open: np.ndarray, close: np.ndarray, title: str): + fig = go.Figure() + text = ['%s
Begin=%d
End=%d
Flow=%d' % (t, o, c, c-o) for o, c, t in zip(open, close, self.time_index)] + fig.add_trace(go.Ohlc(x=self.time_index, open=open, high=open, low=close, close=close, + hoverinfo='text', text=text)) + + fig.update_layout(title_text=title, yaxis_title='Quantity %s' % self.unit, xaxis_rangeslider_visible=False, + xaxis_title='Time', showlegend=False) + + return fig + def stack(self, areas: List[Tuple[str, np.ndarray]], lines: List[Tuple[str, np.ndarray]], title: str): fig = go.Figure() @@ -224,9 +235,7 @@ def __init__(self, agg: ResultAnalyzer, unit_symbol: str = '', :param unit_symbol: symbol on quantity unit used. ex. MW, litter, Go, ... :param time_start: time to use as the start of study horizon :param time_end: time to use as the end of study horizon - :param cmap: matplotlib color map to use (coolwarm as default) :param node_coord: nodes coordinates to use for map plotting - :param map_element_size: size on element draw on map. default as 1. """ ABCPlotting.__init__(self, agg, unit_symbol, time_start, time_end, node_coord) self.plotting = HTMLElementPlotting(self.unit, self.time_index, self.coord) diff --git a/hadar/workflow/pipeline.py b/hadar/workflow/pipeline.py index b817cc7..e7050ee 100644 --- a/hadar/workflow/pipeline.py +++ b/hadar/workflow/pipeline.py @@ -7,10 +7,10 @@ import os from abc import ABC, abstractmethod from copy import deepcopy -from typing import List, Tuple, Union, Dict +from typing import List, Union -import pandas as pd import numpy as np +import pandas as pd from pandas import MultiIndex from hadar.optimizer.input import DTO diff --git a/hadar/workflow/shuffler.py b/hadar/workflow/shuffler.py index aaa496e..322034f 100644 --- a/hadar/workflow/shuffler.py +++ b/hadar/workflow/shuffler.py @@ -5,7 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. import multiprocessing -from typing import Dict, Union import numpy as np import pandas as pd diff --git a/tests/analyzer/test_result.py b/tests/analyzer/test_result.py index 5ed3c50..097fb99 100644 --- a/tests/analyzer/test_result.py +++ b/tests/analyzer/test_result.py @@ -10,9 +10,11 @@ import numpy as np import pandas as pd +from hadar import LPOptimizer from hadar.analyzer.result import Index, ResultAnalyzer, IntIndex from hadar.optimizer.input import Production, Consumption, Study -from hadar.optimizer.output import OutputConsumption, OutputLink, OutputNode, OutputProduction, Result +from hadar.optimizer.output import OutputConsumption, OutputLink, OutputNode, OutputProduction, Result, OutputNetwork, \ + OutputStorage, OutputConverter class TestIndex(unittest.TestCase): @@ -25,17 +27,11 @@ def test_on_element(self): self.assertEqual(False, i.all) self.assertEqual(('fr',), i.index) - def test_list_1(self): + def test_list(self): i = Index(column='i', index=['fr', 'be']) self.assertEqual(False, i.all) self.assertEqual(('fr', 'be'), i.index) - def test_list_2(self): - l = ['fr', 'be'] - i = Index(column='i', index=l) - self.assertEqual(False, i.all) - self.assertEqual(('fr', 'be'), i.index) - def test_filter(self): i = Index(column='i', index=['fr', 'be']) df = pd.DataFrame(data={'i': ['it', 'fr', 'fr', 'be', 'de', 'it', 'be'], @@ -59,37 +55,25 @@ def test_list(self): self.assertEqual((2, 6), i.index) -class TestAnalyzer(unittest.TestCase): +class TestConsumptionAnalyzer(unittest.TestCase): def setUp(self) -> None: self.study = Study(horizon=3, nb_scn=2)\ .network()\ .node('a')\ .consumption(cost=10 ** 3, quantity=[[120, 12, 12], [12, 120, 120]], name='load')\ .consumption(cost=10 ** 3, quantity=[[130, 13, 13], [13, 130, 130]], name='car')\ - .production(cost=10, quantity=[[130, 13, 13], [13, 130, 130]], name='prod')\ .node('b')\ .consumption(cost=10 ** 3, quantity=[[120, 12, 12], [12, 120, 120]], name='load')\ - .production(cost=20, quantity=[[110, 11, 11], [11, 110, 110]], name='prod')\ - .production(cost=20, quantity=[[120, 12, 12], [12, 120, 120]], name='nuclear')\ - .node('c')\ - .link(src='a', dest='b', quantity=[[110, 11, 11], [11, 110, 110]], cost=2)\ - .link(src='a', dest='c', quantity=[[120, 12, 12], [12, 120, 120]], cost=2)\ .build() out = { - 'a': OutputNode(consumptions=[OutputConsumption(cost=10 ** 3, quantity=[[20, 2, 2], [2, 20, 20]], name='load'), - OutputConsumption(cost=10 ** 3, quantity=[[30, 3, 3], [3, 30, 30]], name='car')], - productions=[OutputProduction(cost=10, quantity=[[30, 3, 3], [3, 30, 30]], name='prod')], - links=[OutputLink(dest='b', quantity=[[10, 1, 1], [1, 10, 10]], cost=2), - OutputLink(dest='c', quantity=[[20, 2, 2], [2, 20, 20]], cost=2)]), - - 'b': OutputNode(consumptions=[OutputConsumption(cost=10 ** 3, quantity=[[20, 2, 2], [2, 20, 20]], name='load')], - productions=[OutputProduction(cost=20, quantity=[[10, 1, 1], [1, 10, 10]], name='prod'), - OutputProduction(cost=20, quantity=[[20, 2, 2], [2, 20, 20]], name='nuclear')], - links=[]) + 'a': OutputNode(consumptions=[OutputConsumption(cost=np.ones((2, 3)) * 10 ** 3, quantity=[[20, 2, 2], [2, 20, 20]], name='load'), + OutputConsumption(cost=np.ones((2, 3)) * 10 ** 3, quantity=[[30, 3, 3], [3, 30, 30]], name='car')], + productions=[], storages=[], links=[]), + 'b': OutputNode(consumptions=[OutputConsumption(cost=np.ones((2, 3)) * 10 ** 3, quantity=[[20, 2, 2], [2, 20, 20]], name='load')], + productions=[], storages=[], links=[]) } - - self.result = Result(nodes=out) + self.result = Result(networks={'default': OutputNetwork(nodes=out)}, converters={}) def test_build_consumption(self): # Expected @@ -98,6 +82,7 @@ def test_build_consumption(self): 'given': [20, 2, 2, 2, 20, 20, 30, 3, 3, 3, 30, 30, 20, 2, 2, 2, 20, 20], 'name': ['load'] * 6 + ['car'] * 6 + ['load'] * 6, 'node': ['a'] * 12 + ['b'] * 6, + 'network': ['default'] * 18, 't': [0, 1, 2] * 6, 'scn': [0, 0, 0, 1, 1, 1] * 3}, dtype=float) @@ -105,6 +90,47 @@ def test_build_consumption(self): pd.testing.assert_frame_equal(exp, cons) + def test_aggregate_cons(self): + # Expected + index = pd.Index(data=[0, 1, 2], dtype=float, name='t') + exp_cons = pd.DataFrame(data={'asked': [120, 12, 12], + 'cost': [10 ** 3] * 3, + 'given': [20, 2, 2]}, dtype=float, index=index) + + # Test + agg = ResultAnalyzer(study=self.study, result=self.result) + cons = agg.network().scn(0).node('a').consumption('load').time() + + pd.testing.assert_frame_equal(exp_cons, cons) + + def test_get_elements_inside(self): + agg = ResultAnalyzer(study=self.study, result=self.result) + self.assertEqual((2, 0, 0, 0, 0, 0), agg.get_elements_inside('a')) + self.assertEqual((1, 0, 0, 0, 0, 0), agg.get_elements_inside('b')) + + +class TestProductionAnalyzer(unittest.TestCase): + def setUp(self) -> None: + self.study = Study(horizon=3, nb_scn=2)\ + .network()\ + .node('a')\ + .production(cost=10, quantity=[[130, 13, 13], [13, 130, 130]], name='prod')\ + .node('b')\ + .production(cost=20, quantity=[[110, 11, 11], [11, 110, 110]], name='prod')\ + .production(cost=20, quantity=[[120, 12, 12], [12, 120, 120]], name='nuclear') \ + .build() + + out = { + 'a': OutputNode(productions=[OutputProduction(cost=np.ones((2, 3)) * 10, quantity=[[30, 3, 3], [3, 30, 30]], name='prod')], + consumptions=[], storages=[], links=[]), + + 'b': OutputNode(productions=[OutputProduction(cost=np.ones((2, 3)) * 20, quantity=[[10, 1, 1], [1, 10, 10]], name='prod'), + OutputProduction(cost=np.ones((2, 3)) * 20, quantity=[[20, 2, 2], [2, 20, 20]], name='nuclear')], + consumptions=[], storages=[], links=[]) + } + + self.result = Result(networks={'default': OutputNetwork(nodes=out)}, converters={}) + def test_build_production(self): # Expected exp = pd.DataFrame(data={'cost': [10] * 6 + [20] * 12, @@ -112,6 +138,7 @@ def test_build_production(self): 'used': [30, 3, 3, 3, 30, 30, 10, 1, 1, 1, 10, 10, 20, 2, 2, 2, 20, 20], 'name': ['prod'] * 12 + ['nuclear'] * 6, 'node': ['a'] * 6 + ['b'] * 12, + 'network': ['default'] * 18, 't': [0, 1, 2] * 6, 'scn': [0, 0, 0, 1, 1, 1] * 3}, dtype=float) @@ -119,6 +146,110 @@ def test_build_production(self): pd.testing.assert_frame_equal(exp, prod) + def test_aggregate_prod(self): + # Expected + index = pd.MultiIndex.from_tuples((('a', 'prod', 0.0), ('a', 'prod', 1.0), ('a', 'prod', 2,0), + ('b', 'prod', 0.0), ('b', 'prod', 1.0), ('b', 'prod', 2,0)), + names=['node', 'name', 't'], ) + exp_cons = pd.DataFrame(data={'avail': [130, 13, 13, 110, 11, 11], + 'cost': [10, 10, 10, 20, 20, 20], + 'used': [30, 3, 3, 10, 1, 1]}, dtype=float, index=index) + + # Test + agg = ResultAnalyzer(study=self.study, result=self.result) + cons = agg.network().scn(0).node(['a', 'b']).production('prod').time() + + pd.testing.assert_frame_equal(exp_cons, cons) + + def test_get_elements_inside(self): + agg = ResultAnalyzer(study=self.study, result=self.result) + self.assertEqual((0, 1, 0, 0, 0, 0), agg.get_elements_inside('a')) + self.assertEqual((0, 2, 0, 0, 0, 0), agg.get_elements_inside('b')) + + +class TestStorageAnalyzer(unittest.TestCase): + def setUp(self) -> None: + self.study = Study(horizon=3, nb_scn=2)\ + .network()\ + .node('b')\ + .storage(name='store', capacity=100, flow_in=10, flow_out=20, cost=1) \ + .build() + + out = { + 'b': OutputNode(storages=[OutputStorage(name='store', capacity=[[10, 1, 1], [1, 10, 10]], + flow_out=[[20, 2, 2], [2, 20, 20]], + flow_in=[[30, 3, 3], [3, 30, 30]])], + consumptions=[], productions=[], links=[]) + } + + self.result = Result(networks={'default': OutputNetwork(nodes=out)}, converters={}) + + def test_build_storage(self): + # Expected + exp = pd.DataFrame(data={'max_capacity': [100] * 6, + 'capacity': [10, 1, 1, 1, 10, 10], + 'max_flow_in': [10] * 6, + 'flow_in': [30, 3, 3, 3, 30, 30], + 'max_flow_out': [20] * 6, + 'flow_out': [20, 2, 2, 2, 20, 20], + 'cost': [1] * 6, + 'init_capacity': [0] * 6, + 'eff': [.99] * 6, + 'name': ['store'] * 6, + 'node': ['b'] * 6, + 'network': ['default'] * 6, + 't': [0, 1, 2] * 2, + 'scn': [0, 0, 0, 1, 1, 1]}, dtype=float) + + stor = ResultAnalyzer._build_storage(self.study, self.result) + pd.testing.assert_frame_equal(exp, stor, check_dtype=False) + + def test_aggregate_stor(self): + # Expected + index = pd.MultiIndex.from_tuples((('b', 'store', 0), ('b', 'store', 1), ('b', 'store', 2)), + names=['node', 'name', 't'], ) + exp_stor = pd.DataFrame(data={'capacity': [10, 1, 1], + 'cost': [1, 1, 1], + 'eff': [.99] * 3, + 'flow_in': [30, 3, 3], + 'flow_out': [20, 2, 2], + 'init_capacity': [0] * 3, + 'max_capacity': [100] * 3, + 'max_flow_in': [10] * 3, + 'max_flow_out': [20] * 3}, index=index) + + # Test + agg = ResultAnalyzer(study=self.study, result=self.result) + stor = agg.network().scn(0).node().storage('store').time() + pd.testing.assert_frame_equal(exp_stor, stor, check_dtype=False) + + def test_get_elements_inside(self): + agg = ResultAnalyzer(study=self.study, result=self.result) + self.assertEqual((0, 0, 1, 0, 0, 0), agg.get_elements_inside('b')) + + +class TestLinkAnalyzer(unittest.TestCase): + def setUp(self) -> None: + self.study = Study(horizon=3, nb_scn=2)\ + .network()\ + .node('a')\ + .node('b')\ + .node('c')\ + .link(src='a', dest='b', quantity=[[110, 11, 11], [11, 110, 110]], cost=2)\ + .link(src='a', dest='c', quantity=[[120, 12, 12], [12, 120, 120]], cost=2)\ + .build() + + blank_node = OutputNode(consumptions=[], productions=[], storages=[], links=[]) + out = { + 'a': OutputNode(consumptions=[], productions=[], storages=[], + links=[OutputLink(dest='b', quantity=[[10, 1, 1], [1, 10, 10]], cost=np.ones((2, 3)) * 2), + OutputLink(dest='c', quantity=[[20, 2, 2], [2, 20, 20]], cost=np.ones((2, 3)) * 2)]), + + 'b': blank_node, 'c': blank_node + } + + self.result = Result(networks={'default': OutputNetwork(nodes=out)}, converters={}) + def test_build_link(self): # Expected exp = pd.DataFrame(data={'cost': [2] * 12, @@ -126,6 +257,7 @@ def test_build_link(self): 'used': [10, 1, 1, 1, 10, 10, 20, 2, 2, 2, 20, 20], 'node': ['a'] * 12, 'dest': ['b'] * 6 + ['c'] * 6, + 'network': ['default'] * 12, 't': [0, 1, 2] * 4, 'scn': [0, 0, 0, 1, 1, 1] * 2}, dtype=float) @@ -133,61 +265,134 @@ def test_build_link(self): pd.testing.assert_frame_equal(exp, link) - def test_aggregate_cons(self): + def test_aggregate_link(self): # Expected - index = pd.Index(data=[0, 1, 2], dtype=float, name='t') - exp_cons = pd.DataFrame(data={'asked': [120, 12, 12], - 'cost': [10 ** 3] * 3, - 'given': [20, 2, 2]}, dtype=float, index=index) + index = pd.MultiIndex.from_tuples((('b', 0.0), ('b', 1.0), ('b', 2,0), + ('c', 0.0), ('c', 1.0), ('c', 2,0)), + names=['dest', 't'], ) + exp_link = pd.DataFrame(data={'avail': [110, 11, 11, 120, 12, 12], + 'cost': [2, 2, 2, 2, 2, 2], + 'used': [10, 1, 1, 20, 2, 2]}, dtype=float, index=index) agg = ResultAnalyzer(study=self.study, result=self.result) - cons = agg.network().scn(0).node('a').consumption('load').time() + link = agg.network().scn(0).node('a').link(['b', 'c']).time() - pd.testing.assert_frame_equal(exp_cons, cons) + pd.testing.assert_frame_equal(exp_link, link) - def test_aggregate_prod(self): - # Expected - index = pd.MultiIndex.from_tuples((('a', 'prod', 0.0), ('a', 'prod', 1.0), ('a', 'prod', 2,0), - ('b', 'prod', 0.0), ('b', 'prod', 1.0), ('b', 'prod', 2,0)), - names=['node', 'name', 't'], ) - exp_cons = pd.DataFrame(data={'avail': [130, 13, 13, 110, 11, 11], - 'cost': [10, 10, 10, 20, 20, 20], - 'used': [30, 3, 3, 10, 1, 1]}, dtype=float, index=index) + def test_balance(self): + agg = ResultAnalyzer(study=self.study, result=self.result) + np.testing.assert_array_equal([[30, 3, 3], [3, 30, 30]], agg.get_balance(node='a')) + np.testing.assert_array_equal([[-10, -1, -1], [-1, -10, -10]], agg.get_balance(node='b')) + def test_get_elements_inside(self): agg = ResultAnalyzer(study=self.study, result=self.result) - cons = agg.network().scn(0).node(['a', 'b']).production('prod').time() + self.assertEqual((0, 0, 0, 2, 0, 0), agg.get_elements_inside('a')) - pd.testing.assert_frame_equal(exp_cons, cons) - def test_aggregate_link(self): +class TestConverterAnalyzer(unittest.TestCase): + def setUp(self) -> None: + self.study = Study(horizon=3, nb_scn=2)\ + .network()\ + .node('a')\ + .to_converter(name='conv', ratio=2)\ + .network('elec').node('a')\ + .converter(name='conv', to_network='elec', to_node='a', max=10, cost=1)\ + .build() + + conv = OutputConverter(name='conv', flow_src={('default', 'a'): [[10, 1, 1], [1, 10, 10]]}, flow_dest=[[20, 2, 2], [2, 20, 20]]) + + blank_node = OutputNode(consumptions=[], productions=[], storages=[], links=[]) + self.result = Result(networks={'default': OutputNetwork(nodes={'a': blank_node}), + 'elec': OutputNetwork(nodes={'a': blank_node})}, + converters={'conv': conv}) + + def test_build_dest_converter(self): # Expected - index = pd.MultiIndex.from_tuples((('b', 0.0), ('b', 1.0), ('b', 2,0), - ('c', 0.0), ('c', 1.0), ('c', 2,0)), - names=['dest', 't'], ) - exp_cons = pd.DataFrame(data={'avail': [110, 11, 11, 120, 12, 12], - 'cost': [2, 2, 2, 2, 2, 2], - 'used': [10, 1, 1, 20, 2, 2]}, dtype=float, index=index) + exp = pd.DataFrame(data={'name': ['conv'] * 6, + 'network': ['elec'] * 6, + 'node': ['a'] * 6, + 'flow': [20, 2, 2, 2, 20, 20], + 'cost': [1] * 6, + 'max': [10] * 6, + 't': [0, 1, 2] * 2, + 'scn': [0, 0, 0, 1, 1, 1]}) + + conv = ResultAnalyzer._build_dest_converter(self.study, self.result) + + pd.testing.assert_frame_equal(exp, conv, check_dtype=False) + + def test_build_src_converter(self): + # Expected + exp = pd.DataFrame(data={'name': ['conv'] * 6, + 'network': ['default'] * 6, + 'node': ['a'] * 6, + 'ratio': [2] * 6, + 'flow': [10, 1, 1, 1, 10, 10], + 'max': [5] * 6, + 't': [0, 1, 2] * 2, + 'scn': [0, 0, 0, 1, 1, 1]}) + + conv = ResultAnalyzer._build_src_converter(self.study, self.result) + + pd.testing.assert_frame_equal(exp, conv, check_dtype=False) + + + def test_aggregate_to_conv(self): + # Expected + exp_conv = pd.DataFrame(data={'flow': [10, 1, 1], + 'max': [5] * 3, + 'ratio': [2] * 3}, index=pd.Index([0, 1, 2], name='t')) agg = ResultAnalyzer(study=self.study, result=self.result) - cons = agg.network().scn(0).node('a').link(['b', 'c']).time() + conv = agg.network().scn(0).node('a').to_converter('conv').time() - pd.testing.assert_frame_equal(exp_cons, cons) + pd.testing.assert_frame_equal(exp_conv, conv, check_dtype=False) + + def test_aggregate_from_conv(self): + # Expected + exp_conv = pd.DataFrame(data={'cost': [1] * 3, + 'flow': [20, 2, 2], + 'max': [10] * 3}, index=pd.Index([0, 1, 2], name='t')) - def test_get_elements_inide(self): agg = ResultAnalyzer(study=self.study, result=self.result) - self.assertEqual((2, 1, 2), agg.get_elements_inside('a')) - self.assertEqual((1, 2, 0), agg.get_elements_inside('b')) + conv = agg.network('elec').scn(0).node('a').from_converter('conv').time() - def test_balance(self): + pd.testing.assert_frame_equal(exp_conv, conv, check_dtype=False) + + def test_get_elements_inside(self): agg = ResultAnalyzer(study=self.study, result=self.result) - np.testing.assert_array_equal([[30, 3, 3], [3, 30, 30]], agg.get_balance(node='a')) - np.testing.assert_array_equal([[-10, -1, -1], [-1, -10, -10]], agg.get_balance(node='b')) + self.assertEqual((0, 0, 0, 0, 1, 0), agg.get_elements_inside('a')) + self.assertEqual((0, 0, 0, 0, 0, 1), agg.get_elements_inside('a', network='elec')) + + +class TestAnalyzer(unittest.TestCase): + def setUp(self) -> None: + self.study = Study(horizon=1)\ + .network()\ + .node('a')\ + .consumption(cost=10 ** 3, quantity=100, name='car')\ + .production(cost=10, quantity=70, name='prod')\ + .node('b')\ + .production(cost=20, quantity=70, name='nuclear') \ + .storage(name='store', capacity=100, flow_in=10, flow_out=20, cost=-1) \ + .to_converter(name='conv', ratio=2) \ + .link(src='b', dest='a', quantity=110, cost=2)\ + .network('elec')\ + .node('a')\ + .consumption(cost=10 ** 3, quantity=20, name='load')\ + .converter(name='conv', to_network='elec', to_node='a', max=10, cost=1)\ + .build() + + optim = LPOptimizer() + self.result = optim.solve(self.study) def test_cost(self): agg = ResultAnalyzer(study=self.study, result=self.result) - np.testing.assert_array_equal([[200360, 20036, 20036], [20036, 200360, 200360]], agg.get_cost(node='a')) - np.testing.assert_array_equal([[100600, 10060, 10060], [10060, 100600, 100600]], agg.get_cost(node='b')) + np.testing.assert_array_equal(700, agg.get_cost(node='a')) + np.testing.assert_array_equal(760, agg.get_cost(node='b')) + np.testing.assert_array_equal(10010, agg.get_cost(node='a', network='elec')) def test_rac(self): agg = ResultAnalyzer(study=self.study, result=self.result) - np.testing.assert_array_equal([[0, 0, 0], [0, 0, 0]], agg.get_rac()) + np.testing.assert_array_equal(35, agg.get_rac()) + np.testing.assert_array_equal(-10, agg.get_rac(network='elec')) diff --git a/tests/optimizer/it/test_optimizer.py b/tests/optimizer/it/test_optimizer.py index 2bcc341..a752a67 100644 --- a/tests/optimizer/it/test_optimizer.py +++ b/tests/optimizer/it/test_optimizer.py @@ -8,7 +8,8 @@ import unittest import hadar as hd -from tests.utils import assert_study +from hadar.optimizer.output import OutputNetwork +from tests.utils import assert_result class TestOptimizer(unittest.TestCase): @@ -50,10 +51,11 @@ def test_merit_order(self): hd.OutputProduction(name='nuclear', cost=20, quantity=[[15, 3, 3], [3, 15, 15]]), hd.OutputProduction(name='solar', cost=10, quantity=[[10, 2, 2], [2, 10, 10]]), hd.OutputProduction(name='oil', cost=30, quantity=[[5, 1, 1], [1, 5, 5]])], + storages=[], links=[]) res = self.optimizer.solve(study) - assert_study(self, hd.Result(nodes_expected), res) + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) def test_exchange_two_nodes(self): """ @@ -86,15 +88,17 @@ def test_exchange_two_nodes(self): nodes_expected['a'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[20, 200]], name='load')], productions=[hd.OutputProduction(cost=10, quantity=[[30, 300]], name='prod')], + storages=[], links=[hd.OutputLink(dest='b', quantity=[[10, 100]], cost=2)]) nodes_expected['b'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[20, 200]], name='load')], productions=[hd.OutputProduction(cost=20, quantity=[[10, 100]], name='prod')], + storages=[], links=[]) res = self.optimizer.solve(study) - assert_study(self, hd.Result(nodes_expected), res) + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) def test_exchange_two_concurrent_nodes(self): """ @@ -136,22 +140,25 @@ def test_exchange_two_concurrent_nodes(self): nodes_expected['a'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10]], name='load')], productions=[hd.OutputProduction(cost=10, quantity=[[30]], name='nuclear')], + storages=[], links=[hd.OutputLink(dest='b', quantity=[[10]], cost=2), hd.OutputLink(dest='c', quantity=[[10]], cost=2)]) nodes_expected['b'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10]], name='load')], productions=[hd.OutputProduction(cost=20, quantity=[[0]], name='nuclear')], + storages=[], links=[]) nodes_expected['c'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10]], name='load')], productions=[hd.OutputProduction(cost=20, quantity=[[0]], name='nuclear')], + storages=[], links=[]) res = self.optimizer.solve(study) - assert_study(self, hd.Result(nodes_expected), res) + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) def test_exchange_link_saturation(self): """ @@ -178,21 +185,23 @@ def test_exchange_link_saturation(self): nodes_expected = {} nodes_expected['a'] = hd.OutputNode(productions=[hd.OutputProduction(cost=10, quantity=[[20]], name='nuclear')], links=[hd.OutputLink(dest='b', quantity=[[20]], cost=2)], - consumptions=[]) + storages=[], consumptions=[]) nodes_expected['b'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10]], name='load')], links=[hd.OutputLink(dest='c', quantity=[[10]], cost=2)], + storages=[], productions=[]) nodes_expected['c'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10]], name='load')], productions=[], + storages=[], links=[]) res = self.optimizer.solve(study) - assert_study(self, hd.Result(nodes_expected), res) + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) def test_consumer_cancel_exchange(self): """ @@ -229,21 +238,24 @@ def test_consumer_cancel_exchange(self): nodes_expected['a'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10]], name='load')], productions=[hd.OutputProduction(cost=10, quantity=[[20]], name='nuclear')], + storages=[], links=[hd.OutputLink(dest='b', quantity=[[10]], cost=2)]) nodes_expected['b'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[5]], name='load')], productions=[hd.OutputProduction(cost=20, quantity=[[5]], name='nuclear')], + storages=[], links=[hd.OutputLink(dest='c', quantity=[[10]], cost=2)]) nodes_expected['c'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[20]], name='load')], productions=[hd.OutputProduction(cost=10, quantity=[[10]], name='nuclear')], + storages=[], links=[]) res = self.optimizer.solve(study) - assert_study(self, hd.Result(nodes_expected), res) + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) def test_many_links_on_node(self): @@ -292,16 +304,86 @@ def test_many_links_on_node(self): nodes_expected['a'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[10, 10]], name='load')], productions=[hd.OutputProduction(cost=80, quantity=[[0, 5]], name='gas')], - links=[hd.OutputLink(dest='b', quantity=[[0, 10]], cost=10)]) + storages=[], links=[hd.OutputLink(dest='b', quantity=[[0, 10]], cost=10)]) nodes_expected['b'] = hd.OutputNode( consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[15, 25]], name='load')], - productions=[], links=[]) + storages=[], productions=[], links=[]) nodes_expected['c'] = hd.OutputNode( productions=[hd.OutputProduction(cost=50, quantity=[[25, 30]], name='nuclear')], - links=[], consumptions=[]) + storages=[], links=[], consumptions=[]) + + res = self.optimizer.solve(study) + + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) + + def test_storage(self): + """ + Capacity + | A | --------> | B | + | nuclear: 10 @ 20 | 10 | load: 20, 10, 0, 10 | + | | | storage: 0 @ 30 | + :return: + """ + + study = hd.Study(horizon=4)\ + .network()\ + .node('a')\ + .production(name='nuclear', cost=20, quantity=[10, 10, 10, 0]) \ + .node('b')\ + .consumption(name='load', cost=10 ** 6, quantity=[20, 10, 0, 10]) \ + .storage(name='cell', capacity=30, flow_in=20, flow_out=20, + init_capacity=15, eff=.5)\ + .link(src='a', dest='b', cost=1, quantity=10)\ + .build() + + nodes_expected = dict() + nodes_expected['a'] = hd.OutputNode( + productions=[hd.OutputProduction(cost=20, quantity=[[10, 10, 10, 0]], name='nuclear')], + storages=[], consumptions=[], links=[hd.OutputLink(dest='b', quantity=[[10, 10, 10, 0]], cost=1)]) + + nodes_expected['b'] = hd.OutputNode( + consumptions=[hd.OutputConsumption(cost=10 ** 6, quantity=[[20, 10, 0, 10]], name='load')], + storages=[hd.OutputStorage(name='cell', capacity=[[5, 5, 10, 0]], + flow_in=[[0, 0, 10, 0]], flow_out=[[10, 0, 0, 10]])], + productions=[], links=[]) + + res = self.optimizer.solve(study) + + assert_result(self, hd.Result(networks={'default': OutputNetwork(nodes_expected)}, converters={}), res) + + def test_multi_energies(self): + study = hd.Study(horizon=1)\ + .network('elec')\ + .node('a')\ + .consumption(name='load', cost=10**6, quantity=10)\ + .network('gas')\ + .node('b')\ + .production(name='central', cost=10, quantity=50)\ + .to_converter(name='conv', ratio=0.8)\ + .network('coat')\ + .node('c')\ + .production(name='central', cost=10, quantity=50)\ + .to_converter(name='conv', ratio=0.5)\ + .converter(name='conv', to_network='elec', to_node='a', max=50)\ + .build() + + networks_expected = dict() + networks_expected['elec'] = hd.OutputNetwork(nodes={'a': hd.OutputNode( + consumptions=[hd.OutputConsumption(cost=10**6, quantity=[[10]], name='load')], + storages=[], productions=[], links=[])}) + + networks_expected['gas'] = hd.OutputNetwork(nodes={'b': hd.OutputNode( + productions=[hd.OutputProduction(cost=10, quantity=[[12.5]], name='central')], + storages=[], consumptions=[], links=[])}) + + networks_expected['coat'] = hd.OutputNetwork(nodes={'c': hd.OutputNode( + productions=[hd.OutputProduction(cost=10, quantity=[[20]], name='central')], + storages=[], consumptions=[], links=[])}) + + converter_expected = hd.OutputConverter(name='conv', flow_src={('gas', 'b'): [[12.5]], ('coat', 'c'): [[20]]}, flow_dest=[[10]]) res = self.optimizer.solve(study) - assert_study(self, hd.Result(nodes_expected), res) \ No newline at end of file + assert_result(self, hd.Result(networks=networks_expected, converters={'conv': converter_expected}), res) \ No newline at end of file diff --git a/tests/optimizer/lp/test_mapper.py b/tests/optimizer/lp/test_mapper.py index 0609550..23b4b5e 100644 --- a/tests/optimizer/lp/test_mapper.py +++ b/tests/optimizer/lp/test_mapper.py @@ -8,23 +8,104 @@ import unittest from hadar.optimizer.input import Production, Consumption, Study -from hadar.optimizer.lp.domain import LPLink, LPConsumption, LPProduction, LPNode +from hadar.optimizer.lp.domain import LPLink, LPConsumption, LPProduction, LPNode, LPStorage, LPConverter, LPNetwork from hadar.optimizer.lp.mapper import InputMapper, OutputMapper -from hadar.optimizer.output import OutputConsumption, OutputLink, OutputNode, OutputProduction, Result +from hadar.optimizer.output import OutputConsumption, OutputLink, OutputNode, OutputProduction, Result, OutputNetwork, \ + OutputStorage, OutputConverter from tests.optimizer.lp.ortools_mock import MockSolver, MockNumVar -from tests.utils import assert_study +from tests.utils import assert_result class TestInputMapper(unittest.TestCase): - def test_map_input(self): + def test_map_consumption(self): + # Input + study = Study(horizon=2, nb_scn=2) \ + .network()\ + .node('a')\ + .consumption(name='load', quantity=[[10, 1], [20, 2]], cost=[[.01, .1], [.02, .2]])\ + .build() + + s = MockSolver() + + mapper = InputMapper(solver=s, study=study) + + # Expected + suffix = 'inside network=default on node=a at t=0 for scn=0' + out_cons_0 = [LPConsumption(name='load', cost=.01, quantity=10, variable=MockNumVar(0, 10, 'lol=load %s' % suffix))] + out_node_0 = LPNode(consumptions=out_cons_0, productions=[], storages=[], links=[]) + + self.assertEqual(out_node_0, mapper.get_node_var(network='default', node='a', t=0, scn=0)) + + suffix = 'inside network=default on node=a at t=1 for scn=1' + out_cons_1 = [LPConsumption(name='load', cost=.2, quantity=2, variable=MockNumVar(0, 2, 'lol=load %s' % suffix))] + out_node_1 = LPNode(consumptions=out_cons_1, productions=[], storages=[], links=[]) + + self.assertEqual(out_node_1, mapper.get_node_var(network='default', node='a', t=1, scn=1)) + + def test_map_production(self): + # Input + study = Study(horizon=2, nb_scn=2) \ + .network() \ + .node('a') \ + .production(name='nuclear', quantity=[[12, 2], [21, 20]], cost=[[0.12, 0.2], [0.21, 0.02]]) \ + .build() + + s = MockSolver() + + mapper = InputMapper(solver=s, study=study) + + # Expected + suffix = 'inside network=default on node=a at t=0 for scn=0' + out_prod_0 = [LPProduction(name='nuclear', cost=0.12, quantity=12, variable=MockNumVar(0, 12.0, 'prod=nuclear %s' % suffix))] + out_node_0 = LPNode(consumptions=[], productions=out_prod_0, storages=[], links=[]) + + self.assertEqual(out_node_0, mapper.get_node_var(network='default', node='a', t=0, scn=0)) + + suffix = 'inside network=default on node=a at t=1 for scn=1' + + out_prod_1 = [LPProduction(name='nuclear', cost=.02, quantity=20, variable=MockNumVar(0, 20.0, 'prod=nuclear %s' % suffix))] + out_node_1 = LPNode(consumptions=[], productions=out_prod_1, storages=[], links=[]) + + self.assertEqual(out_node_1, mapper.get_node_var(network='default', node='a', t=1, scn=1)) + + def test_map_storage(self): + # Input + study = Study(horizon=2, nb_scn=2) \ + .network()\ + .node('a')\ + .storage(name='cell', capacity=10, flow_in=1, flow_out=1, cost=1, init_capacity=2, eff=.9) \ + .build() + + s = MockSolver() + + mapper = InputMapper(solver=s, study=study) + + # Expected + suffix = 'inside network=default on node=a at t=0 for scn=0' + out_stor_0 = [LPStorage(name='cell', capacity=10, var_capacity=MockNumVar(0, 10, 'storage_capacity=cell %s' % suffix), + flow_in=1, var_flow_in=MockNumVar(0, 1, 'storage_flow_in=cell %s' % suffix), + flow_out=1, var_flow_out=MockNumVar(0, 1, 'storage_flow_out=cell %s' % suffix), + cost=1, init_capacity=2, eff=.9)] + out_node_0 = LPNode(consumptions=[], productions=[], storages=out_stor_0, links=[]) + + self.assertEqual(out_node_0, mapper.get_node_var(network='default', node='a', t=0, scn=0)) + + suffix = 'inside network=default on node=a at t=1 for scn=1' + out_stor_1 = [LPStorage(name='cell', capacity=10, var_capacity=MockNumVar(0, 10, 'storage_capacity=cell %s' % suffix), + flow_in=1, var_flow_in=MockNumVar(0, 1, 'storage_flow_in=cell %s' % suffix), + flow_out=1, var_flow_out=MockNumVar(0, 1, 'storage_flow_out=cell %s' % suffix), + cost=1, init_capacity=2, eff=.9)] + out_node_1 = LPNode(consumptions=[], productions=[], storages=out_stor_1, links=[]) + + self.assertEqual(out_node_1, mapper.get_node_var(network='default', node='a', t=1, scn=1)) + + def test_map_links(self): # Input study = Study(horizon=2, nb_scn=2) \ .network()\ .node('a')\ - .consumption(name='load', quantity=[[10, 1], [20, 2]], cost=10)\ - .production(name='nuclear', quantity=[[12, 2], [21, 20]], cost=10)\ .node('be')\ - .link(src='a', dest='be', quantity=[[10, 3], [20, 30]], cost=2)\ + .link(src='a', dest='be', quantity=[[10, 3], [20, 30]], cost=[[.01, .3], [.02, .03]])\ .build() s = MockSolver() @@ -32,57 +113,176 @@ def test_map_input(self): mapper = InputMapper(solver=s, study=study) # Expected - out_cons_0 = [LPConsumption(name='load', cost=10, quantity=10, variable=MockNumVar(0, 10, 'lol load on a at t=0 for scn=0'))] - out_prod_0 = [LPProduction(name='nuclear', cost=10, quantity=12, variable=MockNumVar(0, 12.0, 'prod nuclear on a at t=0 for scn=0'))] + suffix = 'inside network=default on node=a at t=0 for scn=0' + out_link_0 = [LPLink(src='a', dest='be', cost=.01, quantity=10, variable=MockNumVar(0, 10.0, 'link=be %s' % suffix))] + out_node_0 = LPNode(consumptions=[], productions=[], storages=[], links=out_link_0) - out_link_0 = [LPLink(src='a', dest='be', cost=2, quantity=10, variable=MockNumVar(0, 10.0, 'link on a to be at t=0 for scn=0'))] - out_node_0 = LPNode(consumptions=out_cons_0, productions=out_prod_0, links=out_link_0) + self.assertEqual(out_node_0, mapper.get_node_var(network='default', node='a', t=0, scn=0)) - self.assertEqual(out_node_0, mapper.get_var(name='a', t=0, scn=0)) + suffix = 'inside network=default on node=a at t=1 for scn=1' + out_link_1 = [LPLink(src='a', dest='be', cost=.03, quantity=30, variable=MockNumVar(0, 30.0, 'link=be %s' % suffix))] + out_node_1 = LPNode(consumptions=[], productions=[], storages=[],links=out_link_1) - out_cons_1 = [LPConsumption(name='load', cost=10, quantity=2, variable=MockNumVar(0, 2, 'lol load on a at t=1 for scn=1'))] - out_prod_1 = [LPProduction(name='nuclear', cost=10, quantity=20, variable=MockNumVar(0, 20.0, 'prod nuclear on a at t=1 for scn=1'))] + self.assertEqual(out_node_1, mapper.get_node_var(network='default', node='a', t=1, scn=1)) - out_link_1 = [LPLink(src='a', dest='be', cost=2, quantity=30, variable=MockNumVar(0, 30.0, 'link on a to be at t=1 for scn=1'))] - out_node_1 = LPNode(consumptions=out_cons_1, productions=out_prod_1, links=out_link_1) + def test_map_converter(self): + # Mock + s = MockSolver() - self.assertEqual(out_node_1, mapper.get_var(name='a', t=1, scn=1)) + # Input + study = Study(horizon=1)\ + .network('gas')\ + .node('a')\ + .to_converter(name='conv', ratio=.5)\ + .network()\ + .node('b')\ + .converter(name='conv', to_network='default', to_node='b', max=100)\ + .build() + + mapper = InputMapper(solver=s, study=study) + + # Expected + suffix = 'at t=0 for scn=0' + out_conv_0 = LPConverter(name='conv', src_ratios={('gas', 'a'): 0.5}, dest_network='default', dest_node='b', + cost=0, max=100, + var_flow_dest=MockNumVar(0, 100, 'flow_dest conv %s' % suffix), + var_flow_src={('gas', 'a'): MockNumVar(0, 200, 'flow_src conv gas:a %s' % suffix)}) + + self.assertEqual(out_conv_0, mapper.get_conv_var(name='conv', t=0, scn=0)) class TestOutputMapper(unittest.TestCase): - def test_map_output(self): + def test_map_consumption(self): + # Input + study = Study(horizon=2, nb_scn=2) \ + .network()\ + .node('a')\ + .consumption(name='load', quantity=[[10, 1], [20, 2]], cost=[[.01, .1], [.02, .2]])\ + .build() + + mapper = OutputMapper(study=study) + + out_cons_0 = [LPConsumption(name='load', cost=.01, quantity=10, variable=MockNumVar(0, 5, ''))] + mapper.set_node_var(network='default', node='a', t=0, scn=0, + vars=LPNode(consumptions=out_cons_0, productions=[], storages=[], links=[])) + + out_cons_1 = [LPConsumption(name='load', cost=.2, quantity=20, variable=MockNumVar(0, 5, ''))] + mapper.set_node_var(network='default', node='a', t=1, scn=1, + vars=LPNode(consumptions=out_cons_1, productions=[], storages=[], links=[])) + + # Expected + cons = OutputConsumption(name='load', quantity=[[5, 0], [0, 15]], cost=[[.01, .1], [.02, .2]]) + nodes = {'a': OutputNode(consumptions=[cons], productions=[], storages=[], links=[])} + expected = Result(networks={'default': OutputNetwork(nodes=nodes)}, converters={}) + + assert_result(self, expected=expected, result=mapper.get_result()) + + def test_map_production(self): + # Input + study = Study(horizon=2, nb_scn=2) \ + .network()\ + .node('a')\ + .production(name='nuclear', quantity=[[12, 2], [21, 20]], cost=[[0.12, 0.2], [0.21, 0.02]]) \ + .build() + + mapper = OutputMapper(study=study) + + out_prod_0 = [LPProduction(name='nuclear', cost=.12, quantity=12, variable=MockNumVar(0, 12, ''))] + mapper.set_node_var(network='default', node='a', t=0, scn=0, + vars=LPNode(consumptions=[], productions=out_prod_0, storages=[], links=[])) + + out_prod_1 = [LPProduction(name='nuclear', cost=.21, quantity=2, variable=MockNumVar(0, 112, ''))] + mapper.set_node_var(network='default', node='a', t=1, scn=1, + vars=LPNode(consumptions=[], productions=out_prod_1, storages=[], links=[])) + + # Expected + prod = OutputProduction(name='nuclear', quantity=[[12, 0], [0, 112]], cost=[[0.12, 0.2], [0.21, 0.02]]) + nodes = {'a': OutputNode(consumptions=[], productions=[prod], storages=[], links=[])} + expected = Result(networks={'default': OutputNetwork(nodes=nodes)}, converters={}) + + assert_result(self, expected=expected, result=mapper.get_result()) + + def test_map_storage(self): + # Input + study = Study(horizon=2, nb_scn=2) \ + .network()\ + .node('a')\ + .storage(name='cell', capacity=10, flow_in=1, flow_out=1, cost=1, init_capacity=2, eff=.9) \ + .build() + + mapper = OutputMapper(study=study) + + out_stor_0 = [LPStorage(name='cell', capacity=10, flow_in=1, flow_out=1, init_capacity=2, eff=.9, cost=1, + var_capacity=MockNumVar(0, 5, ''), + var_flow_in=MockNumVar(0, 2, ''), + var_flow_out=MockNumVar(0, 4, ''))] + mapper.set_node_var(network='default', node='a', t=0, scn=0, + vars=LPNode(consumptions=[], productions=[], storages=out_stor_0, links=[])) + + out_stor_1 = [LPStorage(name='cell', capacity=10, flow_in=1, flow_out=1, init_capacity=2, eff=.9, cost=1, + var_capacity=MockNumVar(0, 55, ''), + var_flow_in=MockNumVar(0, 22, ''), + var_flow_out=MockNumVar(0, 44, ''))] + mapper.set_node_var(network='default', node='a', t=1, scn=1, + vars=LPNode(consumptions=[], productions=[], storages=out_stor_1, links=[])) + + # Expected + stor = OutputStorage(name='cell', capacity=[[5, 0], [0, 55]], flow_in=[[2, 0], [0, 22]], flow_out=[[4, 0], [0, 44]]) + nodes = {'a': OutputNode(consumptions=[], productions=[], storages=[stor], links=[])} + expected = Result(networks={'default': OutputNetwork(nodes=nodes)}, converters={}) + + assert_result(self, expected=expected, result=mapper.get_result()) + + def test_map_link(self): # Input study = Study(horizon=2, nb_scn=2) \ .network()\ .node('a')\ - .consumption(name='load', quantity=[[10, 1], [20, 2]], cost=10)\ - .production(name='nuclear', quantity=[[12, 2], [21, 20]], cost=10)\ .node('be')\ - .link(src='a', dest='be', quantity=[[10, 3], [20, 30]], cost=2)\ + .link(src='a', dest='be', quantity=[[10, 3], [20, 30]], cost=[[.01, .3], [.02, .03]])\ .build() - s = MockSolver() mapper = OutputMapper(study=study) - out_cons_0 = [LPConsumption(name='load', cost=10, quantity=10, variable=MockNumVar(0, 5, ''))] - out_prod_0 = [LPProduction(name='nuclear', cost=10, quantity=12, variable=MockNumVar(0, 12, ''))] + out_link_0 = [LPLink(src='a', dest='be', cost=.01, quantity=10, variable=MockNumVar(0, 8, ''))] + mapper.set_node_var(network='default', node='a', t=0, scn=0, + vars=LPNode(consumptions=[], productions=[], storages=[], links=out_link_0)) + + out_link_1 = [LPLink(src='a', dest='be', cost=.02, quantity=10, variable=MockNumVar(0, 18, ''))] + mapper.set_node_var(network='default', node='a', t=1, scn=1, + vars=LPNode(consumptions=[], productions=[], storages=[], links=out_link_1)) - out_link_0 = [LPLink(src='a', dest='be', cost=2, quantity=10, variable=MockNumVar(0, 8, ''))] - mapper.set_var(name='a', t=0, scn=0, - vars=LPNode(consumptions=out_cons_0, productions=out_prod_0, links=out_link_0)) + # Expected + link = OutputLink(dest='be', quantity=[[8, 0], [0, 18]], cost=[[.01, .3], [.02, .03]]) + nodes = {'a': OutputNode(consumptions=[], productions=[], storages=[], links=[link]), + 'be': OutputNode(consumptions=[], productions=[], storages=[], links=[])} + expected = Result(networks={'default': OutputNetwork(nodes=nodes)}, converters={}) - out_cons_1 = [LPConsumption(name='load', cost=10, quantity=20, variable=MockNumVar(0, 5, ''))] - out_prod_1 = [LPProduction(name='nuclear', cost=10, quantity=2, variable=MockNumVar(0, 112, ''))] + assert_result(self, expected=expected, result=mapper.get_result()) - out_link_1 = [LPLink(src='a', dest='be', cost=2, quantity=10, variable=MockNumVar(0, 18, ''))] - mapper.set_var(name='a', t=1, scn=1, - vars=LPNode(consumptions=out_cons_1, productions=out_prod_1, links=out_link_1)) + def test_map_converter(self): + # Input + study = Study(horizon=1)\ + .network('gas')\ + .node('a')\ + .to_converter(name='conv', ratio=.5)\ + .network()\ + .node('b')\ + .converter(name='conv', to_network='default', to_node='b', max=100)\ + .build() # Expected - node = OutputNode(consumptions=[OutputConsumption(name='load', quantity=[[5, 0], [0, 15]], cost=10)], - productions=[OutputProduction(name='nuclear', quantity=[[12, 0], [0, 112]], cost=10)], - links=[OutputLink(dest='be', quantity=[[8, 0], [0, 18]], cost=2)]) - expected = Result(nodes={'a': node, 'be': OutputNode(consumptions=[], productions=[], links=[])}) + exp = OutputConverter(name='conv', flow_src={('gas', 'a'): [[200]]}, flow_dest=[[100]]) + blank_node = OutputNode(consumptions=[], productions=[], storages=[], links=[]) + mapper = OutputMapper(study=study) + vars = LPConverter(name='conv', src_ratios={('gas', 'a'): 0.5}, dest_network='default', dest_node='b', + cost=0, max=100, + var_flow_dest=MockNumVar(0, 100, 'flow_dest conv %s'), + var_flow_src={('gas', 'a'): MockNumVar(0, 200, 'flow_src conv gas:a %s')}) + mapper.set_converter_var(name='conv', t=0, scn=0, vars=vars) + res = mapper.get_result() + self.assertEqual(Result(networks={'gas': OutputNetwork(nodes={'a': blank_node}), + 'default': OutputNetwork(nodes={'b': blank_node})}, + converters={'conv': exp}), res) - assert_study(self, expected=expected, result=mapper.get_result()) diff --git a/tests/optimizer/lp/test_optimizer.py b/tests/optimizer/lp/test_optimizer.py index 96ae57e..4f972ac 100644 --- a/tests/optimizer/lp/test_optimizer.py +++ b/tests/optimizer/lp/test_optimizer.py @@ -6,14 +6,16 @@ # This file is part of hadar-simulator, a python adequacy library for everyone. import pickle import unittest -from unittest.mock import MagicMock +from unittest.mock import MagicMock, call, ANY from hadar.optimizer.input import Study, Consumption -from hadar.optimizer.lp.domain import LPConsumption, LPProduction, LPLink, LPNode, SerializableVariable +from hadar.optimizer.lp.domain import LPConsumption, LPProduction, LPLink, LPNode, SerializableVariable, LPStorage, \ + LPConverter, LPTimeStep, LPNetwork from hadar.optimizer.lp.mapper import InputMapper, OutputMapper -from hadar.optimizer.lp.optimizer import ObjectiveBuilder, AdequacyBuilder, _solve_batch +from hadar.optimizer.lp.optimizer import ObjectiveBuilder, AdequacyBuilder, _solve_batch, StorageBuilder, \ + ConverterMixBuilder from hadar.optimizer.lp.optimizer import solve_lp -from hadar.optimizer.output import OutputConsumption, OutputNode, Result +from hadar.optimizer.output import OutputConsumption, OutputNode, Result, OutputNetwork, OutputConverter from tests.optimizer.lp.ortools_mock import MockConstraint, MockNumVar, MockObjective, MockSolver @@ -27,11 +29,17 @@ def test_add_node(self): # Input consumptions = [LPConsumption(name='load', quantity=10, cost=10, variable=MockNumVar(0, 10, 'load'))] productions = [LPProduction(name='solar', quantity=10, cost=20, variable=MockNumVar(0, 20, 'solar'))] + storages = [LPStorage(name='cell', capacity=10, var_capacity=MockNumVar(0, 10, 'cell_capacity'), cost=1, + flow_in=1, var_flow_in=MockNumVar(0, 1, 'cell_flow_in'), + flow_out=10, var_flow_out=MockNumVar(0, 10, 'cell_flow_out'), + init_capacity=2, eff=1.2 + )] links = [LPLink(src='fr', dest='be', quantity=10, cost=30, variable=MockNumVar(0, 30, 'be'))] - node = LPNode(consumptions=consumptions, productions=productions, links=links) + node = LPNode(consumptions=consumptions, productions=productions, storages=storages, links=links) # Expected - coeffs = {MockNumVar(0, 10, 'load'): 10, MockNumVar(0, 20, 'solar'): 20, MockNumVar(0, 30, 'be'): 30} + coeffs = {MockNumVar(0, 10, 'load'): 10, MockNumVar(0, 20, 'solar'): 20, MockNumVar(0, 30, 'be'): 30, + MockNumVar(0, 10, 'cell_capacity'): 1} expected = MockObjective(min=True, coeffs=coeffs) # Test @@ -41,6 +49,25 @@ def test_add_node(self): self.assertEqual(expected, builder.objective) + def test_add_converter(self): + # Mock + solver = MockSolver() + + # Input + conv = LPConverter(name='conv', src_ratios={('gas', 'a'): 0.5}, dest_network='default', dest_node='b', + cost=10, max=100, var_flow_dest=MockNumVar(0, 100, 'flow_dest conv %s'), + var_flow_src={('gas', 'a'): MockNumVar(0, 200, 'flow_src conv gas:a %s')}) + + # Expected + expected = MockObjective(min=True, coeffs={MockNumVar(0, 100, 'flow_dest conv %s'): 10}) + + # Test + builder = ObjectiveBuilder(solver=solver) + builder.add_converter(conv) + builder.build() + + self.assertEqual(expected, builder.objective) + class TestAdequacyBuilder(unittest.TestCase): @@ -51,13 +78,19 @@ def test_add_node(self): # Input fr_consumptions = [LPConsumption(name='load', quantity=10, cost=10, variable=MockNumVar(0, 10, 'load'))] fr_productions = [LPProduction(name='solar', quantity=10, cost=20, variable=MockNumVar(0, 20, 'solar'))] + fr_storages = [LPStorage(name='cell', capacity=10, var_capacity=MockNumVar(0, 10, 'cell_capacity'), cost=1, + flow_in=1, var_flow_in=MockNumVar(0, 1, 'cell_flow_in'), + flow_out=10, var_flow_out=MockNumVar(0, 10, 'cell_flow_out'), + init_capacity=2, eff=1.2)] fr_links = [LPLink(src='fr', dest='be', quantity=10, cost=30, variable=MockNumVar(0, 30, 'be'))] - fr_node = LPNode(consumptions=fr_consumptions, productions=fr_productions, links=fr_links) + fr_node = LPNode(consumptions=fr_consumptions, productions=fr_productions, storages=fr_storages, links=fr_links) - be_node = LPNode(consumptions=[], productions=[], links=[]) + be_node = LPNode(consumptions=[], productions=[], storages=[], links=[]) # Expected - fr_coeffs = {MockNumVar(0, 10, 'load'): 1, MockNumVar(0, 20, 'solar'): 1, MockNumVar(0, 30, 'be'): -1} + fr_coeffs = {MockNumVar(0, 10, 'load'): 1, MockNumVar(0, 20, 'solar'): 1, + MockNumVar(0, 1, 'cell_flow_in'): -1, MockNumVar(0, 10, 'cell_flow_out'): 1, + MockNumVar(0, 30, 'be'): -1} fr_constraint = MockConstraint(10, 10, coeffs=fr_coeffs) be_coeffs = {MockNumVar(0, 30, 'be'): 1} @@ -65,19 +98,117 @@ def test_add_node(self): # Test builder = AdequacyBuilder(solver=solver) - builder.add_node(name='fr', node=fr_node, t=0) - builder.add_node(name='be', node=be_node, t=0) + builder.add_node(name_network='default', name_node='fr', node=fr_node, t=0) + builder.add_node(name_network='default', name_node='be', node=be_node, t=0) builder.build() - self.assertEqual(fr_constraint, builder.constraints[(0, 'fr')]) - self.assertEqual(be_constraint, builder.constraints[(0, 'be')]) + self.assertEqual(fr_constraint, builder.constraints[(0, 'default', 'fr')]) + self.assertEqual(be_constraint, builder.constraints[(0, 'default', 'be')]) + + def test_add_converter(self): + # Mock + solver = MockSolver() + + # Input + conv = LPConverter(name='conv', src_ratios={('gas', 'a'): 0.5}, dest_network='default', dest_node='b', + cost=10, max=100, var_flow_dest=MockNumVar(0, 100, 'flow_dest conv %s'), + var_flow_src={('gas', 'a'): MockNumVar(0, 200, 'flow_src conv gas:a %s')}) + + adequacy = AdequacyBuilder(solver=solver) + adequacy.constraints[(0, 'gas', 'a')] = MockConstraint(10, 10, coeffs={}) + adequacy.constraints[(0, 'default', 'b')] = MockConstraint(10, 10, coeffs={}) + + # Test + adequacy.add_converter(conv=conv, t=0) + + self.assertEqual({MockNumVar(0, 100, 'flow_dest conv %s'): 1}, adequacy.constraints[(0, 'default', 'b')].coeffs) + self.assertEqual({MockNumVar(0, 200, 'flow_src conv gas:a %s'): -1}, adequacy.constraints[(0, 'gas', 'a')].coeffs) + + +class TestStorageBuilder(unittest.TestCase): + def test_t0(self): + # Mock + solver = MockSolver() + + # Input + c0 = MockNumVar(0, 10, 'cell_capacity') + storages = [LPStorage(name='cell', capacity=10, var_capacity=c0, cost=1, + flow_in=1, var_flow_in=MockNumVar(0, 1, 'cell_flow_in'), + flow_out=10, var_flow_out=MockNumVar(0, 10, 'cell_flow_out'), + init_capacity=2, eff=1.2)] + node = LPNode(consumptions=[], productions=[], storages=storages, links=[]) + + # Expected + coeffs = {MockNumVar(0, 1, 'cell_flow_in'): -1.2, MockNumVar(0, 10, 'cell_flow_out'): 1, + c0: 1} + constraint = MockConstraint(2, 2, coeffs=coeffs) + + # Test + builder = StorageBuilder(solver=solver) + res = builder.add_node(name_network='default', name_node='fr', node=node, t=0) + + self.assertEqual(constraint, res) + self.assertEqual(builder.capacities[(0, 'default', 'fr', 'cell')], c0) + + def test(self): + # Mock + solver = MockSolver() + + # Input + storages = [LPStorage(name='cell', capacity=10, var_capacity=MockNumVar(0, 10, 'cell_capacity at 1'), cost=1, + flow_in=1, var_flow_in=MockNumVar(0, 1, 'cell_flow_in'), + flow_out=10, var_flow_out=MockNumVar(0, 10, 'cell_flow_out'), + init_capacity=2, eff=1.2)] + node = LPNode(consumptions=[], productions=[], storages=storages, links=[]) + + c0 = MockNumVar(0, 11, 'cell_capacity at 0') + c1 = MockNumVar(0, 10, 'cell_capacity at 1') + + # Expected + coeffs = {MockNumVar(0, 1, 'cell_flow_in'): -1.2, MockNumVar(0, 10, 'cell_flow_out'): 1, + c0: -1, c1: 1} + constraint = MockConstraint(0, 0, coeffs=coeffs) + + # Test + builder = StorageBuilder(solver=solver) + builder.capacities[(0, 'default', 'fr', 'cell')] = c0 + res = builder.add_node(name_network='default', name_node='fr', node=node, t=1) + + self.assertEqual(constraint, res) + self.assertEqual(c1, builder.capacities[(1, 'default', 'fr', 'cell')]) + + +class TestConverterMixBuilder(unittest.TestCase): + def test(self): + # Mock + solver = MockSolver() + + # Input + conv = LPConverter(name='conv', src_ratios={('gas', 'a'): 0.5}, dest_network='default', dest_node='b', + cost=10, max=100, var_flow_dest=MockNumVar(0, 100, 'flow_dest conv %s'), + var_flow_src={('gas', 'a'): MockNumVar(0, 200, 'flow_src conv gas:a %s')}) + + # Expected + expected = MockConstraint(0, 0, coeffs={MockNumVar(0, 100, 'flow_dest conv %s'): -1, + MockNumVar(0, 200, 'flow_src conv gas:a %s'): 0.5}) + + # Test + builder = ConverterMixBuilder(solver=solver) + res = builder.add_converter(conv) + self.assertEqual([expected], res) class TestSolve(unittest.TestCase): def test_solve_batch(self): # Input study = Study(horizon=1, nb_scn=1) \ - .network().node('a').consumption(name='load', cost=10, quantity=10).build() + .network()\ + .node('a')\ + .consumption(name='load', cost=10, quantity=10)\ + .to_converter(name='conv')\ + .network('gas').node('b')\ + .converter(name='conv', to_network='gas', to_node='b', max=10, cost=1)\ + .build() # Mock solver = MockSolver() @@ -85,54 +216,105 @@ def test_solve_batch(self): objective = ObjectiveBuilder(solver=solver) objective.add_node = MagicMock() + objective.add_converter = MagicMock() objective.build = MagicMock() adequacy = AdequacyBuilder(solver=solver) adequacy.add_node = MagicMock() + adequacy.add_converter = MagicMock() adequacy.build = MagicMock() + storage = StorageBuilder(solver=solver) + storage.add_node = MagicMock() + storage.build = MagicMock() + + mix = ConverterMixBuilder(solver=solver) + mix.add_converter = MagicMock() + mix.build = MagicMock() + in_cons = LPConsumption(name='load', quantity=10, cost=10, variable=MockNumVar(0, 10, 'load')) - var = LPNode(consumptions=[in_cons], productions=[], links=[]) + var_node = LPNode(consumptions=[in_cons], productions=[], storages=[], links=[]) + empty_node = LPNode(consumptions=[], productions=[], storages=[], links=[]) + var_conv = LPConverter(name='conv', src_ratios={('default', 'a'): .5}, + var_flow_src={('default', 'a'): MockNumVar(0, 10, 'conv src')}, + dest_network='gas', dest_node='b', max=10, cost=1, + var_flow_dest=MockNumVar(0, 10, 'conv dest')) + + def side_effect(network, node, t, scn): + return var_node if network == 'default' and node == 'a' else empty_node in_mapper = InputMapper(solver=solver, study=study) - in_mapper.get_var = MagicMock(return_value=var) + in_mapper.get_node_var = MagicMock(side_effect=side_effect) + + exp_var_conv = LPConverter(name='conv', src_ratios={('default', 'a'): .5}, max=10, cost=1, + var_flow_src={('default', 'a'): MockNumVar(0, 10, 'conv src')}, + dest_network='gas', dest_node='b', var_flow_dest=MockNumVar(0, 10, 'conv dest')) + in_mapper.get_conv_var = MagicMock(return_value=exp_var_conv) # Expected in_cons = LPConsumption(name='load', quantity=10, cost=10, variable=SerializableVariable(MockNumVar(0, 10, 'load'))) - exp_var = LPNode(consumptions=[in_cons], productions=[], links=[]) + exp_var_node = LPNode(consumptions=[in_cons], productions=[], storages=[], links=[]) + exp_var_conv = LPConverter(name='conv', src_ratios={('default', 'a'): .5}, + var_flow_src={('default', 'a'): SerializableVariable(MockNumVar(0, 10, 'conv src'))}, + dest_network='gas', dest_node='b', max=10, cost=1, + var_flow_dest=SerializableVariable(MockNumVar(0, 10, 'conv dest'))) + + expected = LPTimeStep(networks={'default': LPNetwork(nodes={'a': exp_var_node}), + 'gas': LPNetwork(nodes={'b': empty_node})}, + converters={'conv': exp_var_conv}) # Test - res = _solve_batch((study, 0, solver, objective, adequacy, in_mapper)) + res = _solve_batch((study, 0, solver, objective, adequacy, storage, mix, in_mapper)) + res = pickle.loads(res) + self.assertEqual([expected], res) + + in_mapper.get_node_var.assert_has_calls([call(network='default', node='a', t=0, scn=0), + call(network='gas', node='b', t=0, scn=0)]) + adequacy.add_node.assert_has_calls([call(name_network='default', name_node='a', t=0, node=var_node), + call(name_network='gas', name_node='b', t=0, node=empty_node)]) + storage.add_node.assert_has_calls([call(name_network='default', name_node='a', t=0, node=var_node), + call(name_network='gas', name_node='b', t=0, node=empty_node)]) + mix.add_converter.assert_called_with(conv=var_conv) - self.assertEqual([{'a': exp_var}], pickle.loads(res)) - in_mapper.get_var.assert_called_with(name='a', t=0, scn=0) - adequacy.add_node.assert_called_with(name='a', t=0, node=var) - objective.add_node.assert_called_with(node=var) + objective.add_node.assert_has_calls([call(node=var_node), call(node=empty_node)]) + objective.add_converter(conv=var_conv) objective.build.assert_called_with() adequacy.build.assert_called_with() + storage.build.assert_called_with() + mix.build.assert_called_with() solver.Solve.assert_called_with() def test_solve(self): # Input study = Study(horizon=1, nb_scn=1) \ - .network().node('a').consumption(name='load', cost=10, quantity=10).build() + .network('gas').node('a')\ + .consumption(name='load', cost=10, quantity=10)\ + .to_converter(name='conv', ratio=0.5)\ + .network().node('b')\ + .converter(name='conv', to_network='default', to_node='b', max=10, cost=1)\ + .build() # Expected - out_a = OutputNode(consumptions=[OutputConsumption(name='load', cost=10, quantity=[0])], - productions=[], links=[]) - exp_result = Result(nodes={'a': out_a}) - - in_cons = LPConsumption(name='load', quantity=10, cost=10, variable=SerializableVariable(MockNumVar(0, 10, ''))) - exp_var = LPNode(consumptions=[in_cons], productions=[], links=[]) + out_node = OutputNode(consumptions=[OutputConsumption(name='load', cost=10, quantity=[0])], + productions=[], storages=[], links=[]) + out_conv = OutputConverter(name='conv', flow_src={('gas', 'a'): [0]}, flow_dest=[0]) + exp_result = Result(networks={'gas': OutputNetwork(nodes={'a': out_node})}, + converters={'conv': out_conv}) # Mock + + out_mapper = OutputMapper(study=study) - out_mapper.set_var = MagicMock() + out_mapper.set_node_var = MagicMock() + out_mapper.set_converter_var = MagicMock() out_mapper.get_result = MagicMock(return_value=exp_result) # Test res = solve_lp(study, out_mapper) self.assertEqual(exp_result, res) - out_mapper.set_var.assert_called_with(name='a', t=0, scn=0, vars=exp_var) + out_mapper.set_node_var.assert_has_calls([call(network='gas', node='a', t=0, scn=0, vars=ANY), + call(network='default', node='b', t=0, scn=0, vars=ANY)]) + out_mapper.set_converter_var.assert_called_with(name='conv', t=0, scn=0, vars=ANY) + diff --git a/tests/optimizer/remote/test_optimizer.py b/tests/optimizer/remote/test_optimizer.py index d37279a..2d380b6 100644 --- a/tests/optimizer/remote/test_optimizer.py +++ b/tests/optimizer/remote/test_optimizer.py @@ -4,121 +4,71 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. - -import pickle +import json +import threading import unittest -from typing import Dict, List, Tuple -from unittest.mock import MagicMock +from http.server import BaseHTTPRequestHandler, HTTPServer from hadar import RemoteOptimizer -from hadar.optimizer.input import Study, Consumption -from hadar.optimizer.output import Result, OutputConsumption, OutputNode -from hadar.optimizer.remote.optimizer import _solve_remote_wrap, ServerError +from hadar.optimizer.input import Study +from hadar.optimizer.output import Result, OutputConsumption, OutputNode, OutputNetwork +from hadar.optimizer.remote.optimizer import check_code -class MockResponse: - def __init__(self, content, code=200): - self.content = pickle.dumps(content) - self.status_code = code +class MockSchedulerServer(BaseHTTPRequestHandler): + def do_POST(self): + assert self.path == '/api/v1/study?token=' + content_length = int(self.headers['Content-Length']) + data = json.loads(self.rfile.read(content_length).decode()) + assert isinstance(Study.from_json(data), Study) -class MockRequest: - def __init__(self, unit: unittest.TestCase, post: List[Dict], get: List[Dict]): - self.unit = unit - self._post = post - self._get = get + self.send_response(200) + body = json.dumps({'job': 123, 'status': 'QUEUED', 'progress': 1}).encode() + self.send_header('Content-Length', str(len(body))) + self.end_headers() + self.wfile.write(body) - @staticmethod - def cut_url(url): - return url[4:] # Remove 'host at the beginning + def do_GET(self): + assert '/api/v1/result/123?token=' == self.path - def get(self, url, params): - self.unit.assertEqual(self._get[0]['url'], MockRequest.cut_url(url)) - self.unit.assertEqual(self._get[0]['params'], params) - res = self._get[0]['res'] - del self._get[0] - return res + nodes = {'a': OutputNode(consumptions=[OutputConsumption(cost=0, quantity=[0], name='load')], + productions=[], storages=[], links=[])} + res = Result(networks={'default': OutputNetwork(nodes=nodes)}, converters={}) - def post(self, url, params, data): - self.unit.assertEqual(self._post[0]['url'], MockRequest.cut_url(url)) - self.unit.assertEqual(self._post[0]['params'], params) - self.unit.assertEqual(pickle.dumps(self._post[0]['data']), data) - res = self._post[0]['res'] - del self._post[0] - return res + self.send_response(200) + body = json.dumps({'job': 123, 'status': 'TERMINATED', 'result': res.to_json()}).encode() + self.send_header('Content-Length', str(len(body))) + self.end_headers() + self.wfile.write(body) -class RemoteOptimizerTest(unittest.TestCase): +def handle_twice(handle_request): + handle_request() # one for Post /study + handle_request() # second for GET /result/123 + +class RemoteOptimizerTest(unittest.TestCase): def setUp(self) -> None: self.study = Study(horizon=1) \ .network().node('a').consumption(cost=0, quantity=[0], name='load').build() - self.result = Result(nodes={ - 'a': OutputNode(consumptions=[OutputConsumption(cost=0, quantity=[0], name='load')], - productions=[], links=[])}) + nodes = {'a': OutputNode(consumptions=[OutputConsumption(cost=0, quantity=[0], name='load')], + productions=[], storages=[], links=[])} + self.result = Result(networks={'default': OutputNetwork(nodes=nodes)}, converters={}) def test_job_terminated(self): - requests = MockRequest(unit=self, - post=[dict(url='/study', params={'token': 'pwd'}, data=self.study, - res=MockResponse({'job': 'myid', 'status': 'QUEUED', 'progress': 1})) - ], - get=[dict(url='/result/myid', params={'token': 'pwd'}, - res=MockResponse({'status': 'QUEUED', 'progress': 1})), - dict(url='/result/myid', params={'token': 'pwd'}, - res=MockResponse({'status': 'COMPUTING', 'progress': 0})), - dict(url='/result/myid', params={'token': 'pwd'}, - res=MockResponse({'status': 'TERMINATED', 'result': 'myresult'})) - ]) - - res = _solve_remote_wrap(study=self.study, url='host', token='pwd', rqt=requests) - self.assertEqual('myresult', res) - - def test_job_error(self): - requests = MockRequest(unit=self, - post=[dict(url='/study', params={'token': 'pwd'}, data=self.study, - res=MockResponse({'job': 'myid', 'status': 'QUEUED', 'progress': 1})) - ], - get=[dict(url='/result/myid', params={'token': 'pwd'}, - res=MockResponse({'status': 'QUEUED', 'progress': 1})), - dict(url='/result/myid', params={'token': 'pwd'}, - res=MockResponse({'status': 'COMPUTING', 'progress': 0})), - dict(url='/result/myid', params={'token': 'pwd'}, - res=MockResponse({'status': 'ERROR', 'message': 'HUGE ERROR'})) - ]) - - self.assertRaises(ServerError, - lambda: _solve_remote_wrap(study=self.study, url='host', token='pwd', rqt=requests)) - - def test_404(self): - requests = MockRequest(unit=self, - post=[dict(url='/study', params={'token': 'pwd'}, data=self.study, - res=MockResponse(None, 404))], - get=[]) - requests.post = MagicMock(return_value=MockResponse(content=None, code=404)) - - self.assertRaises(ValueError, - lambda: _solve_remote_wrap(study=self.study, url='host', token='pwd', rqt=requests)) - - def test_403(self): - requests = MockRequest(unit=self, - post=[dict(url='/study', params={'token': 'pwd'}, data=self.study, - res=MockResponse(None, 403))], - get=[]) - - self.assertRaises(ValueError, - lambda: _solve_remote_wrap(study=self.study, url='host', token='pwd', rqt=requests)) - - def test_500(self): - requests = MockRequest(unit=self, - post=[dict(url='/study', params={'token': 'pwd'}, data=self.study, - res=MockResponse(None, 500))], - get=[]) - - self.assertRaises(IOError, - lambda: _solve_remote_wrap(study=self.study, url='host', token='pwd', rqt=requests)) - - def no_test_server(self): - optim = RemoteOptimizer(url='http://localhost:5000') + # Start server + httpd = HTTPServer(('localhost', 6964), MockSchedulerServer) + server = threading.Thread(None, handle_twice, None, (httpd.handle_request,)) + server.start() + + optim = RemoteOptimizer(url='http://localhost:6964') res = optim.solve(self.study) - print(res) \ No newline at end of file + + self.assertEqual(self.result, res) + + def test_check_code(self): + self.assertRaises(ValueError, lambda: check_code(404)) + self.assertRaises(ValueError, lambda: check_code(403)) + self.assertRaises(IOError, lambda: check_code(500)) diff --git a/tests/optimizer/test_input.py b/tests/optimizer/test_input.py index f62b11c..b3a43d7 100644 --- a/tests/optimizer/test_input.py +++ b/tests/optimizer/test_input.py @@ -4,41 +4,55 @@ # If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. # SPDX-License-Identifier: Apache-2.0 # This file is part of hadar-simulator, a python adequacy library for everyone. - +import json import unittest import numpy as np -from hadar.optimizer.input import Study, Consumption, Production, Link +from hadar.optimizer.input import Study, Consumption, Production, Link, Storage, Converter +from utils import assert_result class TestStudy(unittest.TestCase): - def test_create_study(self): - c = Consumption(name='load', cost=20, quantity=10) - p = Production(name='nuclear', cost=20, quantity=10) - l = Link(dest='a', cost=20, quantity=10) - - study = Study(horizon=1) \ + def setUp(self) -> None: + self.study = Study(horizon=1) \ .network() \ .node('a') \ .consumption(name='load', cost=20, quantity=10) \ .production(name='nuclear', cost=20, quantity=10) \ + .to_converter(name='converter', ratio=1)\ .node('b') \ .link(src='b', dest='a', cost=20, quantity=10) \ + .network('gas')\ + .node('b')\ + .production(name='nuclear', cost=20, quantity=10)\ + .storage(name='store', capacity=100, flow_in=10, flow_out=10, cost=1, init_capacity=4, eff=0.1)\ + .node('a')\ + .consumption(name='load', cost=20, quantity=10)\ + .link(src='b', dest='a', cost=20, quantity=10) \ + .converter(name='converter', to_network='gas', to_node='b', cost=10, max=10) \ .build() - self.assertEqual(c, study.nodes['a'].consumptions[0]) - self.assertEqual(p, study.nodes['a'].productions[0]) - self.assertEqual(l, study.nodes['b'].links[0]) - self.assertEqual(1, study.horizon) + def test_create_study(self): + c = Consumption(name='load', cost=20, quantity=10) + p = Production(name='nuclear', cost=20, quantity=10) + s = Storage(name='store', capacity=100, flow_in=10, flow_out=10, cost=1, init_capacity=4, eff=0.1) + l = Link(dest='a', cost=20, quantity=10) + v = Converter(name='converter', src_ratios={('default', 'a'): 1}, dest_network='gas', + dest_node='b', cost=10, max=10) + + self.assertEqual(c, self.study.networks['default'].nodes['a'].consumptions[0]) + self.assertEqual(p, self.study.networks['default'].nodes['a'].productions[0]) + self.assertEqual(l, self.study.networks['default'].nodes['b'].links[0]) + self.assertEqual(c, self.study.networks['gas'].nodes['a'].consumptions[0]) + self.assertEqual(p, self.study.networks['gas'].nodes['b'].productions[0]) + self.assertEqual(s, self.study.networks['gas'].nodes['b'].storages[0]) + self.assertEqual(l, self.study.networks['gas'].nodes['b'].links[0]) - def test_wrong_production_cost(self): - def test(): - study = Study(horizon=1) \ - .network().node('fr').production(name='solar', cost=-1, quantity=10).build() + self.assertEqual(v, self.study.converters['converter']) - self.assertRaises(ValueError, test) + self.assertEqual(1, self.study.horizon) def test_wrong_production_quantity(self): def test(): @@ -58,13 +72,6 @@ def test(): self.assertRaises(ValueError, test) - def test_wrong_consumption_cost(self): - def test(): - study = Study(horizon=1) \ - .network().node('fr').consumption(name='load', cost=-1, quantity=10).build() - - self.assertRaises(ValueError, test) - def test_wrong_consumption_quantity(self): def test(): study = Study(horizon=1) \ @@ -81,16 +88,41 @@ def test(): .consumption(name='load', cost=1, quantity=-10)\ .build() + def test_wrong_storage_flow(self): + def test_in(): + study = Study(horizon=1)\ + .network().node('fr')\ + .storage(name='store', capacity=1, flow_in=-1, flow_out=1)\ + .build() - self.assertRaises(ValueError, test) + def test_out(): + study = Study(horizon=1)\ + .network().node('fr')\ + .storage(name='store', capacity=1, flow_in=1, flow_out=-1)\ + .build() + self.assertRaises(ValueError, test_in) + self.assertRaises(ValueError, test_out) + + def test_wrong_storage_capacity(self): + def test_capacity(): + study = Study(horizon=1)\ + .network().node('fr')\ + .storage(name='store', capacity=-1, flow_in=1, flow_out=1)\ + .build() + + def test_init_capacity(): + study = Study(horizon=1)\ + .network().node('fr')\ + .storage(name='store', capacity=1, flow_in=1, flow_out=1, init_capacity=-1)\ + .build() + self.assertRaises(ValueError, test_capacity) + self.assertRaises(ValueError, test_init_capacity) - def test_wrong_link_cost(self): + def test_wrong_storage_eff(self): def test(): - study = Study(horizon=1) \ - .network()\ - .node('fr')\ - .node('be')\ - .link(src='fr', dest='be', cost=-10, quantity=10)\ + study = Study(horizon=1)\ + .network().node('fr')\ + .storage(name='store', capacity=1, flow_in=1, flow_out=1, eff=-1)\ .build() self.assertRaises(ValueError, test) @@ -129,13 +161,43 @@ def test(): self.assertRaises(ValueError, test) + def test_wrong_converter_dest(self): + def test_network(): + study = Study(horizon=1)\ + .network('elec')\ + .node('a')\ + .converter(name='conv', to_network='gas', to_node='a', max=1)\ + .build() + + def test_node(): + study = Study(horizon=1)\ + .network('gas')\ + .node('a')\ + .converter(name='conv', to_network='gas', to_node='b', max=1)\ + .build() + + self.assertRaises(ValueError, test_network) + self.assertRaises(ValueError, test_node) + + def test_wrong_converter_src(self): + def test(): + study = Study(horizon=1)\ + .network()\ + .node('a')\ + .to_converter(name='conv', ratio=1)\ + .to_converter(name='conv', ratio=2)\ + .converter(name='conv', to_node='', to_network='', max=1)\ + .build() + + self.assertRaises(ValueError, test) + def test_validate_quantity_perfect_size(self): # Input study = Study(horizon=10, nb_scn=2).network().build() i = np.ones((2, 10)) # Test - r = study._validate_quantity(i) + r = study._standardize_array(i) np.testing.assert_array_equal(i, r) def test_validate_quantity_expend_scn(self): @@ -148,7 +210,7 @@ def test_validate_quantity_expend_scn(self): [1, 2, 3, 4, 5]]) # Test - res = study._validate_quantity(i) + res = study._standardize_array(i) np.testing.assert_array_equal(exp, res) def test_validate_quantity_expend_horizon(self): @@ -164,7 +226,7 @@ def test_validate_quantity_expend_horizon(self): [5, 5]]) # Test - res = study._validate_quantity(i) + res = study._standardize_array(i) np.testing.assert_array_equal(exp, res) def test_validate_quantity_expend_both(self): @@ -176,15 +238,17 @@ def test_validate_quantity_expend_both(self): exp = np.ones((3, 2)) # Test - res = study._validate_quantity(i) + res = study._standardize_array(i) np.testing.assert_array_equal(exp, res) def test_validate_quantity_wrong_size(self): # Input study = Study( horizon=2).network().build() - self.assertRaises(ValueError, lambda: study._validate_quantity([4, 5, 1])) - - def test_validate_quantity_negative(self): - # Input - study = Study(horizon=3).network().build() - self.assertRaises(ValueError, lambda: study._validate_quantity([4, -5, 1])) \ No newline at end of file + self.assertRaises(ValueError, lambda: study._standardize_array([4, 5, 1])) + + def test_serialization(self): + d = self.study.to_json() + j = json.dumps(d) + s = json.loads(j) + s = Study.from_json(s) + self.assertEqual(self.study, s) \ No newline at end of file diff --git a/tests/optimizer/test_output.py b/tests/optimizer/test_output.py new file mode 100644 index 0000000..fc860a6 --- /dev/null +++ b/tests/optimizer/test_output.py @@ -0,0 +1,24 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Apache License, version 2.0. +# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0. +# SPDX-License-Identifier: Apache-2.0 +# This file is part of hadar-simulator, a python adequacy library for everyone. +import json +import unittest + +from hadar.optimizer.output import * + + +class TestResult(unittest.TestCase): + def test_json(self): + result = Result(networks={'default': OutputNetwork(nodes={'a': OutputNode( + consumptions=[OutputConsumption(name='load', cost=[[1]], quantity=[[1]])], + productions=[OutputProduction(name='prod', cost=[[1]], quantity=[[1]])], + links=[OutputLink(dest='b', cost=[[1]], quantity=[[1]])], + storages=[OutputStorage(name='cell', capacity=[[1]], flow_in=[[1]], flow_out=[[1]])])})}, + converters={'cell': OutputConverter(name='conv', flow_src={('elec', 'b'): [[1]]}, flow_dest=[[1]])}) + + string = json.dumps(result.to_json()) + r = Result.from_json(json.loads(string)) + self.assertEqual(result, r) \ No newline at end of file diff --git a/tests/utils.py b/tests/utils.py index 468f109..7df5d81 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -10,49 +10,61 @@ from hadar.optimizer.output import Result -def assert_study(self, expected: Result, result: Result): - for name, node in expected.nodes.items(): - if name not in result.nodes.keys(): - self.fail('Node {} expected but not'.format(name)) - res = result.nodes[name] - - # Consumptions - for cons_expected, cons_res in zip(node.consumptions, res.consumptions): - self.assertEqual(cons_expected.name, cons_res.name, - "Consumption for node {} has different name".format(name)) - np.testing.assert_array_equal(cons_expected.quantity, cons_res.quantity, - 'Consumption {} for node {} has different quantity'.format(cons_expected.name, name)) - self.assertEqual(cons_expected.cost, cons_res.cost, - 'Consumption {} for node {} has different cost'.format(cons_expected.name, name)) - - # Productions - for prod_expected, prod_res in zip(node.productions, res.productions): - self.assertEqual(prod_expected.name, prod_res.name, - "Production for node {} has different name".format(name)) - np.testing.assert_array_equal(prod_expected.quantity, prod_res.quantity, - 'Production {} for node {} has different quantity'.format(prod_expected.name, name)) - self.assertEqual(prod_expected.cost, prod_res.cost, - 'Production {} for node {} has different cost'.format(prod_expected.name, name)) - - # Links - for link_expected, link_res in zip(node.links, res.links): - self.assertEqual(link_expected.dest, link_res.dest, - "Link for node {} has different name".format(name)) - np.testing.assert_array_equal(link_expected.quantity, link_res.quantity, - 'Link {} for node {} has different quantity'.format(link_expected.dest, name)) - self.assertEqual(link_expected.cost, link_res.cost, - 'Link {} for node {} has different cost'.format(link_expected.dest, name)) - - -def plot(d): - - print('=============================================================================================') - print("Node ", d.state.name, 'rac=', d.state.rac, 'cost=', d.state.cost) - print('\nEvents') - print('\tname\tmes') - for event in d.events: - print('\t{name: <8}{mes}'.format(name=event.name, mes=event.message)) - - print(d.state.consumptions) - print(d.state.productions) - print(d.state.exchanges) +def assert_result(self, expected: Result, result: Result): + for name_network, network in expected.networks.items(): + if name_network not in result.networks.keys(): + self.fail('Network {} expected but not'.format(name_network)) + + for name_node, node in network.nodes.items(): + if name_node not in result.networks[name_network].nodes.keys(): + self.fail('Node {} expected but not'.format(name_node)) + res = result.networks[name_network].nodes[name_node] + + # Consumptions + for cons_expected, cons_res in zip(node.consumptions, res.consumptions): + self.assertEqual(cons_expected.name, cons_res.name, + "Consumption for node {} has different name".format(name_node)) + np.testing.assert_array_equal(cons_expected.quantity, cons_res.quantity, + 'Consumption {} for node {} has different quantity'.format(cons_expected.name, name_node)) + np.testing.assert_array_equal(cons_expected.cost, cons_res.cost, + 'Consumption {} for node {} has different cost'.format(cons_expected.name, name_node)) + + # Productions + for prod_expected, prod_res in zip(node.productions, res.productions): + self.assertEqual(prod_expected.name, prod_res.name, + "Production for node {} has different name".format(name_node)) + np.testing.assert_array_equal(prod_expected.quantity, prod_res.quantity, + 'Production {} for node {} has different quantity'.format(prod_expected.name, name_node)) + np.testing.assert_array_equal(prod_expected.cost, prod_res.cost, + 'Production {} for node {} has different cost'.format(prod_expected.name, name_node)) + + # Storage + for stor_expected, stor_res in zip(node.storages, res.storages): + self.assertEqual(stor_expected.name, stor_res.name, + 'Storage for node {} has different name'.format(name_node)) + np.testing.assert_array_almost_equal(stor_expected.flow_in, stor_res.flow_in, 4, + 'Storage {} for node {} has different flow in'.format(stor_res.name, name_node)) + np.testing.assert_array_almost_equal(stor_expected.flow_out, stor_res.flow_out, 4, + 'Storage {} for node {} has different flow out'.format(stor_res.name, name_node)) + np.testing.assert_array_almost_equal(stor_expected.capacity, stor_res.capacity, 4, + 'Storage {} for node {} has different capacity'.format(stor_res.name, name_node)) + + # Links + for link_expected, link_res in zip(node.links, res.links): + self.assertEqual(link_expected.dest, link_res.dest, + "Link for node {} has different name".format(name_node)) + np.testing.assert_array_equal(link_expected.quantity, link_res.quantity, + 'Link {} for node {} has different quantity'.format(link_expected.dest, name_node)) + np.testing.assert_array_equal(link_expected.cost, link_res.cost, + 'Link {} for node {} has different cost'.format(link_expected.dest, name_node)) + + # Converter + for name, exp in expected.converters.items(): + self.assertTrue(name in result.converters, 'Converter {} not in result'.format(name)) + for src, flow in exp.flow_src.items(): + self.assertTrue(src in result.converters[name].flow_src, 'Converter {} has not src {} in result'.format(name, src)) + np.testing.assert_array_equal(flow, result.converters[name].flow_src[src], + 'converter {} as different source {}'.format(name, src)) + + np.testing.assert_array_equal(exp.flow_dest, result.converters[name].flow_dest, + 'Converter {} has different flow dest'.format(name)) diff --git a/tests/viewer/test_html.py b/tests/viewer/test_html.py index 952ae60..92cc667 100644 --- a/tests/viewer/test_html.py +++ b/tests/viewer/test_html.py @@ -40,48 +40,110 @@ def setUp(self) -> None: self.hash = hashlib.sha3_256() - def test_stack(self): - fig = self.plot.network().node('a').stack(scn=0) - self.assert_fig_hash('d9f9f004b98ca62be934d69d4fd0c1a302512242', fig) - - def test_map_exchanges(self): + def test_network(self): fig = self.plot.network().map(t=0, scn=0, zoom=1.6) # Used this line to plot map: plot(fig) self.assert_fig_hash('49d81d1457b2ac78e1fc6ae4c1fc6215b8a0bbe4', fig) - def test_plot_timeline(self): - fig = self.plot.network().node('a').consumption('load').timeline() - self.assert_fig_hash('ba776202b252c9df5c81ca869b2e2d85e56e5589', fig) + fig = self.plot.network().rac_matrix() + self.assert_fig_hash('2b87a4e781e9eeb532f5d2b091c474bb0de625fd', fig) - fig = self.plot.network().node('b').production('nuclear').timeline() - self.assert_fig_hash('33baf5d01fda12b6a2d025abf8421905fc24abe1', fig) + def test_node(self): + fig = self.plot.network().node('a').stack(scn=0) + self.assert_fig_hash('d9f9f004b98ca62be934d69d4fd0c1a302512242', fig) - fig = self.plot.network().node('a').link('b').timeline() - self.assert_fig_hash('0c87d1283db5250858b14e2240d30f9059459e65', fig) + def test_consumption(self): + fig = self.plot.network().node('a').consumption('load').timeline() + self.assert_fig_hash('ba776202b252c9df5c81ca869b2e2d85e56e5589', fig) - def test_plot_monotone(self): fig = self.plot.network().node('a').consumption('load').monotone(scn=0) self.assert_fig_hash('1ffa51a52b066aab8cabb817c11fd1272549eb9d', fig) + fig = self.plot.network().node('a').consumption('load').gaussian(scn=0) + self.assert_fig_hash('4f3676a65cde6c268233679e1d0e6207df62764d', fig) + + def test_production(self): + fig = self.plot.network().node('b').production('nuclear').timeline() + self.assert_fig_hash('33baf5d01fda12b6a2d025abf8421905fc24abe1', fig) + fig = self.plot.network().node('b').production('nuclear').monotone(t=0) self.assert_fig_hash('e059878aac45330810578482df8c3d19261f7f75', fig) + fig = self.plot.network().node('b').production('nuclear').gaussian(t=0) + # Fail devops self.assert_fig_hash('45ffe15df1d72829ebe2283c9c4b65ee8465c978', fig) + + def test_link(self): + fig = self.plot.network().node('a').link('b').timeline() + self.assert_fig_hash('97f413ea2fa9908abebf381ec588a7e60b906884', fig) + fig = self.plot.network().node('a').link('b').monotone(scn=0) - self.assert_fig_hash('1d5dba9e2189c741e5daa36d69ff1a879f169964', fig) + self.assert_fig_hash('08b0e0d8414bee2c5083a298af00fe86d0eba6b0', fig) - def test_rac_heatmap(self): - fig = self.plot.network().rac_matrix() - self.assert_fig_hash('2b87a4e781e9eeb532f5d2b091c474bb0de625fd', fig) + fig = self.plot.network().node('a').link('b').gaussian(scn=0) + self.assert_fig_hash('5151ade23440beeea9ff144245f81b057c0fa2cd', fig) - def test_gaussian(self): - fig = self.plot.network().node('a').consumption('load').gaussian(scn=0) - self.assert_fig_hash('4f3676a65cde6c268233679e1d0e6207df62764d', fig) + def test_storage(self): + study = Study(horizon=4)\ + .network()\ + .node('a')\ + .production(name='nuclear', cost=20, quantity=[10, 10, 10, 0]) \ + .node('b')\ + .consumption(name='load', cost=10 ** 6, quantity=[20, 10, 0, 10]) \ + .storage(name='cell', capacity=30, flow_in=10, flow_out=10, init_capacity=15, eff=.5) \ + .link(src='a', dest='b', cost=1, quantity=10)\ + .build() - fig = self.plot.network().node('b').production('nuclear').gaussian(t=0) - # Fail devops self.assert_fig_hash('45ffe15df1d72829ebe2283c9c4b65ee8465c978', fig) + optimizer = LPOptimizer() + res = optimizer.solve(study) + plot = HTMLPlotting(agg=ResultAnalyzer(study, res), unit_symbol='MW', time_start='2020-02-01', time_end='2020-02-02') - fig = self.plot.network().node('a').link('b').gaussian(scn=0) - self.assert_fig_hash('52620565ce8ea670b18707cccf30594b5c3d58ea', fig) + fig = plot.network().node('b').stack() + self.assert_fig_hash('94760e8b7d07704cfe4132a918b4075f5f594d69', fig) + + fig = plot.network().node('b').storage('cell').candles(scn=0) + self.assert_fig_hash('594ae603876c2d1bc91899e89d6de50bf37071ee', fig) + + fig = plot.network().node('b').storage('cell').monotone(scn=0) + self.assert_fig_hash('f020d7954b2fa2245001a4b34530d65ddbd87382', fig) + + def test_converter(self): + study = Study(horizon=2)\ + .network('elec')\ + .node('a')\ + .consumption(name='load', cost=10**6, quantity=[10, 30])\ + .network('gas')\ + .node('b')\ + .production(name='central', cost=10, quantity=50)\ + .to_converter(name='conv', ratio=0.8)\ + .network('coat')\ + .node('c')\ + .production(name='central', cost=10, quantity=60)\ + .to_converter(name='conv', ratio=0.5)\ + .converter(name='conv', to_network='elec', to_node='a', max=50)\ + .build() + + optim = LPOptimizer() + res = optim.solve(study) + plot = HTMLPlotting(agg=ResultAnalyzer(study, res), unit_symbol='MW', time_start='2020-02-01', + time_end='2020-02-02') + + fig = plot.network('elec').node('a').stack() + self.assert_fig_hash('0969b8b1bde6695a4c8cc78fdc5a42928f7af956', fig) + + fig = plot.network('gas').node('b').stack() + self.assert_fig_hash('d9a5c9f13c932048f1bcb22ec849a7a4e79b577b', fig) + + fig = plot.network('elec').node('a').from_converter('conv').timeline() + self.assert_fig_hash('5a42ce7a62c12c092631f0a9b63f807ada94ed79', fig) + + fig = plot.network('gas').node('b').to_converter('conv').timeline() + self.assert_fig_hash('77de14a806dff91a118d395b3e0d998335d64cd7', fig) + + fig = plot.network('gas').node('b').to_converter('conv').monotone(scn=0) + self.assert_fig_hash('3f6ac9f5e1c8ca611d39b7c62f527e4bfd5a573a', fig) + + fig = plot.network('elec').node('a').from_converter('conv').gaussian(scn=0) + self.assert_fig_hash('32a6e175600822c833a9b7f3008aa35230b0b646', fig) def assert_fig_hash(self, expected: str, fig: go.Figure): actual = hashlib.sha1(TestHTMLPlotting.get_html(fig)).hexdigest()