diff --git a/WCI_model.py b/WCI_model.py index da870fb..f801cb7 100644 --- a/WCI_model.py +++ b/WCI_model.py @@ -7,7 +7,7 @@ # # ## Developed by [Near Zero](http://nearzero.org) # -# ### Version 1.0 (Oct 10, 2018) +# ### Version 1.0.1 (Oct 15, 2018) # # This model simulates the supply-demand balance of the Western Climate Initiative cap-and-trade program, jointly operated by California and Quebec. # @@ -59,7 +59,7 @@ # from bokeh.models.tools import SaveTool from bokeh.models import Legend from bokeh.layouts import gridplot -from bokeh.palettes import viridis +from bokeh.palettes import Viridis, Blues, YlOrBr # note: Viridis is a dict; viridis is a function # # for html markup box # from bokeh.io import output_file, show @@ -104,6 +104,9 @@ class Prmt(): """ def __init__(self): + + self.model_version = '1.0.1' + self.online_settings_auction = True # will be overridden below for testing; normally set by user interface self.years_not_sold_out = () # set by user interface self.fract_not_sold = float(0) # set by user interface @@ -234,11 +237,11 @@ def load_input_files(): # main input_file try: prmt.input_file = pd.ExcelFile(prmt.input_file_raw_url_short) - logging.info("downloaded input file from short url") + # logging.info("downloaded input file from short url") # prmt.loading_msg_pre_refresh += ["Loading input file..."] # for UI except: prmt.input_file = pd.ExcelFile(prmt.blob_master + prmt.input_file_raw_url_short) - logging.info("downloaded input file from full url") + logging.info("downloaded input file using full url") # prmt.loading_msg_pre_refresh += ["Downloading input file..."] # for UI # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -2793,13 +2796,13 @@ def redesignate_unsold_advance_as_advance(all_accts, juris): sales_pct_adv_Q2 = df.at[f"{cq.date.year}Q2"] sales_pct_adv_Q3 = df.at[f"{cq.date.year}Q3"] -# # for db +# # for debugging # if use_fake_data == True: # # for 2017Q4, override actual value for adv sales in 2017Q2; set to 100% # # this allows redesignation of unsold from 2017Q1 in 2017Q4 # if cq.date == quarter_period('2017Q4'): # sales_pct_adv_Q2 = float(1) -# # end db +# # end debugging if sales_pct_adv_Q2 == float(1) and sales_pct_adv_Q3 == float(1): # 100% of auction sold; redesignate unsold from Q1, up to limit @@ -2914,6 +2917,9 @@ def process_auction_adv_all_accts(all_accts, juris): # iterate through all rows for available allowances; remove those sold # create df to collect sold quantities; initialize with zeros + # sort_index so that earliest vintages are drawn from first + adv_avail_1j_1q = adv_avail_1j_1q.sort_index() + adv_sold_1j_1q = adv_avail_1j_1q.copy() adv_sold_1j_1q['quant'] = float(0) @@ -3213,6 +3219,9 @@ def reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q): # initialize df to collect all reintro reintro_1j_1q = prmt.standard_MI_empty.copy() + # sort_index to ensure that earliest vintages are drawn from first + reintro_eligible_1j = reintro_eligible_1j.sort_index() + for row in reintro_eligible_1j.index: if max_cur_reintro_1j_1q_remaining == 0: break @@ -3223,7 +3232,7 @@ def reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q): # update accumulator for amount reintro so far in present quarter (may be more than one batch) reintro_1q_quant += reintro_one_batch_quantity - + # update un-accumulator for max_cur_reintro_1j_1q_remaining (may be more than one batch) max_cur_reintro_1j_1q_remaining += -1*reintro_one_batch_quantity @@ -3456,6 +3465,9 @@ def process_auction_cur_CA_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + reintro_avail_1q = reintro_avail_1q.sort_index() + reintro_sold_1q = reintro_avail_1q.copy() reintro_sold_1q['quant'] = float(0) @@ -3523,6 +3535,9 @@ def process_auction_cur_CA_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + new_avail_1q = new_avail_1q.sort_index() + new_sold_1q = new_avail_1q.copy() new_sold_1q['quant'] = float(0) @@ -4967,6 +4982,9 @@ def transfer_QC_alloc_trueups__from_alloc_hold(all_accts): # create df of those transferred: # copy whole df trueup_potential, zero out values, then set new values in loop + # sort_index to ensure that earliest vintages are drawn from first + trueup_potential = trueup_potential.sort_index() + trueup_transfers = trueup_potential.copy() trueup_transfers['quant'] = float(0) # note: trueup_transfers winds up with zero rows because it is not built up from appending rows @@ -5621,9 +5639,12 @@ def retire_for_EIM_outstanding(all_accts): # get quantity to be retired in cq.date.year; # initialization of variable that will be updated EIM_retirements = assign_EIM_retirements() + EIM_remaining = EIM_retirements.at[cq.date.year] # create df for adding transfers; copy of retire_potential, but with values zeroed out + # sort_index to ensure earliest vintages are drawn from first + retire_potential = retire_potential.sort_index() to_retire = retire_potential.copy() to_retire['quant'] = float(0) @@ -5646,7 +5667,7 @@ def retire_for_EIM_outstanding(all_accts): 'inst_cat': 'EIM_retire', 'date_level': cq.date} to_retire = multiindex_change(to_retire, mapping_dict) - + # concat to_retire with all_accts remainder all_accts = pd.concat([all_accts.loc[~mask], retire_potential, to_retire], sort=True) @@ -5906,6 +5927,9 @@ def process_auction_cur_QC_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + reintro_avail_1q = reintro_avail_1q.sort_index() + reintro_sold_1q = reintro_avail_1q.copy() reintro_sold_1q['quant'] = float(0) @@ -5972,6 +5996,9 @@ def process_auction_cur_QC_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + new_avail_1q = new_avail_1q.sort_index() + new_sold_1q = new_avail_1q.copy() new_sold_1q['quant'] = float(0) @@ -6285,17 +6312,22 @@ def create_progress_bar(wid): def assign_EIM_retirements(): - logging.info(f"initialization: {inspect.currentframe().f_code.co_name} (start)") + """ + Assign quantities for EIM Outstanding Emissions retirements in 2018, 2019, and 2020. + + These are for EIM Outstanding Emissions incurred in 2017, 2018, and 2019Q1. + + As of Oct. 2018, there was no clear data on quantities to be retired for EIM Outstanding Emissions. + + Therefore values here are set to zero until more information is available. - # *possible* assumption that would be consistent with what ARB said during informal process: - # assume 5 MMTCO2e retired for EIM incurred in 2017 (processed in 2018) - # assume 5 MMTCO2e retired for EIM incurred in 2018 (processed in 2019) - # assume 5/4 MMTCO2e retired for EIM incurred in 2019Q1 (processed in 2020) - # (units MMTCO2e) + """ + logging.info(f"initialization: {inspect.currentframe().f_code.co_name} (start)") EIM_retirements_dict = {2018: 0, 2019: 0, - 2020: 0} + 2020: 0 / 4} + EIM_retirements = pd.Series(EIM_retirements_dict) EIM_retirements.name = 'EIM_retirements' EIM_retirements.index.name = 'year processed' @@ -6305,6 +6337,14 @@ def assign_EIM_retirements(): # ~~~~~~~~~~~~~~~~~~ def assign_bankruptcy_retirements(): + """ + Handling of bankruptcy retirements based on "2018 Regulation Documents (Narrow Scope)": + https://www.arb.ca.gov/regact/2018/capandtradeghg18/capandtradeghg18.htm + + Quantity for 2019 based on ARB statement in ARB, "Supporting Material for Assessment of Post-2020 Caps" (Apr 2018): + https://www.arb.ca.gov/cc/capandtrade/meetings/20180426/carb_post2020caps.pdf + "Approximately 5 million allowances to be retired in response to a recent bankruptcy" + """ logging.info(f"initialization: {inspect.currentframe().f_code.co_name} (start)") # bankruptcy retirements (units MMTCO2e) @@ -7679,7 +7719,6 @@ def create_figures(): border_line_color=None) p1.add_layout(legend, 'below') - # p1.add_tools(SaveTool()) em_CAQC_fig = p1 @@ -7689,7 +7728,11 @@ def create_figures(): # set y_max using balance_source, where balance is bank + unsold y_max = (int(prmt.balance.max() / 100) + 1) * 100 - y_min = (int(prmt.reserve_sales.min() / 100) - 1) * 100 + if prmt.reserve_sales.min() == 0: + y_min = 0 + else: + # then abs(prmt.reserve_sales.min()) > 0 + y_min = (int(prmt.reserve_sales.min() / 100) - 1) * 100 p2 = figure(title='private bank and unsold allowances (cumulative)', height = 600, width = 700, @@ -7699,10 +7742,13 @@ def create_figures(): # toolbar_sticky=False, ) - p2.yaxis.axis_label = "MMTCO2e" + p2.xaxis.axis_label = "at end of each year" p2.xaxis.major_label_standoff = 10 p2.xaxis.minor_tick_line_color = None + + p2.yaxis.axis_label = "MMTCO2e" p2.yaxis.minor_tick_line_color = None + p2.outline_line_color = "white" # p2.min_border_top = 10 p2.min_border_right = 15 @@ -7711,26 +7757,26 @@ def create_figures(): unsold_vbar = p2.vbar(prmt.balance.index, top=prmt.balance, width=1, - color=viridis(6)[4], # 'limegreen', - line_width=1, line_color='black') + color=Viridis[6][4], + line_width=1, line_color='dimgray') bank_vbar = p2.vbar(prmt.bank_cumul_pos.index, top=prmt.bank_cumul_pos, width=1, - color=viridis(6)[3], # 'seagreen', - line_width=1, line_color='black') + color=Viridis[6][3], + line_width=0.5, line_color='dimgray') reserve_vbar = p2.vbar(prmt.reserve_sales.index, top=prmt.reserve_sales, width=1, color='tomato', - line_width=1, line_color='black') + line_width=0.5, line_color='dimgray') # add vertical line for divider between full historical data vs. projection (partial or full) p2.line([emissions_last_hist_yr+0.5, emissions_last_hist_yr+0.5], [y_min, y_max], line_color='black', - # line_width=2, + line_width=1, line_dash='dashed') legend = Legend(items=[('private bank', [bank_vbar]), @@ -7742,7 +7788,6 @@ def create_figures(): border_line_color=None) p2.add_layout(legend, 'below') - # p2.add_tools(SaveTool()) bank_CAQC_fig_bar = p2 @@ -8110,8 +8155,8 @@ def create_offsets_tabs(): def create_export_df(): # metadata for figure_for_export - metadata_list = [] # initialize - descrip_list = [] # initialize + descrip_list = [f'WCI cap-and-trade model version {prmt.model_version}'] # initialize with model version number + metadata_list = [f'https://github.com/nearzero/WCI-cap-and-trade/tree/v{prmt.model_version}'] # initialize with model version number metadata_list_of_tuples = [] # initialize if emissions_tabs.selected_index == 0: @@ -8421,7 +8466,7 @@ def save_csv_on_click(b): display(Javascript(prmt.js_download_of_csv)) # end of save_csv_on_click - + # ~~~~~~~~~~~~~~ save_csv_button.on_click(save_csv_on_click) @@ -8446,7 +8491,47 @@ def save_csv_on_click(b): display(offsets_tabs_explainer_title) -# #### export snaps_end +# #### export snaps_end_all + +# In[ ]: + + +# if __name__ == '__main__': +# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime()) + +# if prmt.run_hindcast == True: + +# # collect the snaps +# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False) +# snaps_end_all_CA_QC = df + +# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0: +# # export as "all sell out (hindcast)" +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC all sell out (hindcast) {save_timestamp}.csv") + +# else: +# # export as "some unsold (hindcast)" +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC some unsold (hindcast) {save_timestamp}.csv") + +# else: # prmt.run_hindcast == False +# try: +# # collect the snaps, select only Q4 +# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False) +# snaps_end_all_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy() + +# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0: +# # export as "all sell out (not hindcast)" +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC all sell out (not hindcast) {save_timestamp}.csv") +# else: +# # export as "some unsold (not hindcast) +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC some unsold (not hindcast) {save_timestamp}.csv") +# except: +# # no results; initial run using defaults, so snaps are empty +# # export would just be the same as prmt.snaps_end_Q4 +# pass + + +# #### export snaps_end_Q4 # In[ ]: @@ -8486,4 +8571,15 @@ def save_csv_on_click(b): # pass +# In[ ]: + + +# if __name__ == '__main__': +# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime()) + +# avail_accum_all = pd.concat([scenario_CA.avail_accum, scenario_QC.avail_accum], axis=0, sort=False) + +# avail_accum_all.to_csv(os.getcwd() + '/' + f"avail_accum_all all sell out {save_timestamp}.csv") + + # # END OF MODEL diff --git a/WCI_model_interface.ipynb b/WCI_model_interface.ipynb index a984f33..43b1335 100644 --- a/WCI_model_interface.ipynb +++ b/WCI_model_interface.ipynb @@ -3,7 +3,12 @@ { "cell_type": "markdown", "metadata": { - "hide_input": false + "deletable": false, + "editable": false, + "hide_input": false, + "run_control": { + "frozen": true + } }, "source": [ "\"Drawing\"\n", @@ -12,7 +17,7 @@ "\n", "## Developed by [Near Zero](http://nearzero.org)\n", "\n", - "### Version 1.0\n", + "### Version 1.0.1\n", "\n", "This model simulates the supply-demand balance of the Western Climate Initiative cap-and-trade program, jointly operated by California and Quebec.\n", "\n", diff --git a/WCI_model_notebook.ipynb b/WCI_model_notebook.ipynb new file mode 100644 index 0000000..1a7c464 --- /dev/null +++ b/WCI_model_notebook.ipynb @@ -0,0 +1,9264 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Drawing\"\n", + "\n", + "# Western Climate Initiative cap-and-trade model\n", + "\n", + "## Developed by [Near Zero](http://nearzero.org)\n", + "\n", + "### Version 1.0.1 (Oct 15, 2018)\n", + "\n", + "This model simulates the supply-demand balance of the Western Climate Initiative cap-and-trade program, jointly operated by California and Quebec.\n", + "\n", + "---\n", + "\n", + "© Copyright 2018 by [Near Zero](http://nearzero.org). This work is licensed under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/).\n", + "\n", + "Mason Inman (minman@nearzero.org) is the project manager and technical lead for the development of this model.\n", + "\n", + "The model is open source, released under the Creative Commons license above, and is written in Python, including use of the library [Pandas](https://pandas.pydata.org/). The online user interface is built using [Jupyter](https://jupyter.org/), with figures using [Bokeh](http://bokeh.pydata.org/), and hosted online through [Binder](https://mybinder.org/).\n", + "\n", + "View the [model code](https://github.com/nearzero/WCI-cap-and-trade) on Github, and download the [model documentation](https://github.com/nearzero/WCI-cap-and-trade/blob/master/documentation.docx?raw=true).\n", + "\n", + "Near Zero gratefully acknowledges support for this work from the Energy Foundation, grant number G-1804-27647. Near Zero is solely responsible for the content. The model, its results, and its documentation are for informational purposes only and do not constitute investment advice.\n", + "\n", + "**About Near Zero**: Near Zero is a non-profit environmental research organization based at the Carnegie Institution for Science on the Stanford University campus. Near Zero provides credible, impartial, and actionable assessment with the goal of cutting greenhouse gas emissions to near zero." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# IMPORT LIBRARIES" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from pandas.tseries.offsets import *\n", + "import numpy as np\n", + "\n", + "import ipywidgets as widgets\n", + "from IPython.core.display import display # display used for widgets and for hiding code cells\n", + "from IPython.display import clear_output, Javascript # Javascript is for csv save\n", + "\n", + "import time\n", + "# from time import sleep\n", + "import datetime as dt\n", + "from datetime import datetime\n", + "\n", + "import os\n", + "import inspect # for getting name of current function\n", + "import logging\n", + "\n", + "# pd.__version__, np.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import bokeh\n", + "\n", + "from bokeh.plotting import figure, show, output_notebook # save\n", + "# from bokeh.models.tools import SaveTool\n", + "from bokeh.models import Legend\n", + "from bokeh.layouts import gridplot\n", + "from bokeh.palettes import Viridis, Blues, YlOrBr # note: Viridis is a dict; viridis is a function\n", + "\n", + "# # for html markup box\n", + "# from bokeh.io import output_file, show\n", + "\n", + "# use if working offline; also might help with Binder loading\n", + "from bokeh.resources import INLINE\n", + "\n", + "output_notebook(resources=INLINE, hide_banner=True)\n", + "# hide_banner gets rid of message \"BokehJS ... successfully loaded\"\n", + "\n", + "from bokeh.document import Document\n", + "from bokeh.models.layouts import Column" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize logging\n", + "save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n", + "\n", + "# start logging\n", + "# to save logs, need to update below with the correct strings and selection for your desired directory\n", + "try:\n", + " if os.getcwd().split('/')[4] == 'cap_and_trade_active_dev':\n", + " LOG_PATH = os.getcwd() + '/logs'\n", + " logging.basicConfig(filename=f\"{LOG_PATH}/WCI_cap_trade_log_{save_timestamp}.txt\", \n", + " filemode='a', # choices: 'w' or 'a'\n", + " level=logging.INFO)\n", + " else:\n", + " # don't save log\n", + " pass\n", + "except:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Prmt():\n", + " \"\"\"\n", + " Class to create object prmt that has parameters used throughout the model as its attributes.\n", + " \"\"\"\n", + " \n", + " def __init__(self):\n", + " \n", + " self.model_version = '1.0.1'\n", + " \n", + " self.online_settings_auction = True # will be overridden below for testing; normally set by user interface\n", + " self.years_not_sold_out = () # set by user interface\n", + " self.fract_not_sold = float(0) # set by user interface\n", + " \n", + " self.run_hindcast = False # set to true to start model run at beginning of each market (2012Q4/2013Q4)\n", + " \n", + " self.run_tests = True\n", + " self.verbose_log = True\n", + " self.test_failed_msg = 'Test failed!:' \n", + " \n", + " self.CA_post_2020_regs = 'Proposed_Regs_Sep_2018'\n", + " self.QC_post_2020_regs = 'Proposed_Regs_Sep_2018'\n", + " # regs choices are: 'Regs_Oct_2017', 'Preliminary_Discussion_Draft', 'Proposed_Regs_Sep_2018'\n", + " \n", + " self.neg_cut_off = 10/1e6 # units MMTCO2e; enter number of allowances (tons CO2e) in numerator\n", + " # doesn't matter whether negative or positive entered here; used with -abs(neg_cut_off)\n", + " self.show_neg_msg = False # if False, fn test_for_negative_values won't print any messages \n", + " \n", + " self.CA_start_date = pd.to_datetime('2012Q4').to_period('Q') # default\n", + " self.QC_start_date = pd.to_datetime('2013Q4').to_period('Q') # default\n", + " self.CA_end_date = pd.to_datetime('2030Q4').to_period('Q') # default\n", + " self.QC_end_date = pd.to_datetime('2030Q4').to_period('Q') # default\n", + " self.model_end_date = pd.to_datetime('2030Q4').to_period('Q') # default\n", + " \n", + " # generate list of quarters to iterate over (inclusive)\n", + " # range has DateOffset(months=3) at the end, because end of range is not included in the range generated\n", + " self.CA_quarters = pd.date_range(start=self.CA_start_date.to_timestamp(),\n", + " end=self.CA_end_date.to_timestamp() + DateOffset(months=3), \n", + " freq='Q').to_period('Q')\n", + "\n", + " # generate list of quarters to iterate over (inclusive)\n", + " # range has DateOffset(months=3) at the end, because end of range is not included in the range generated\n", + " self.QC_quarters = pd.date_range(start=self.QC_start_date.to_timestamp(),\n", + " end=self.QC_end_date.to_timestamp() + DateOffset(months=3), \n", + " freq='Q').to_period('Q')\n", + " \n", + " self.blob_master = \"https://github.com/nearzero/WCI-cap-and-trade/blob/master\"\n", + " self.input_file_raw_url_short = \"/data/data_input_file.xlsx?raw=true\"\n", + " self.CIR_raw_url_short = \"/data/CIR_file.xlsx?raw=true\"\n", + " \n", + " self.snaps_end_Q4 = '' # value filled in by fn download_input_files\n", + " self.snaps_end_Q4_sum = '' # value filled in by fn download_input_files\n", + " \n", + " self.CA_cap_adjustment_factor = '' # value filled in by fn download_input_files\n", + " \n", + " self.NaT_proxy = pd.to_datetime('2200Q1').to_period('Q')\n", + " \n", + " self.standard_MI_names = ['acct_name', 'juris', 'auct_type', 'inst_cat', 'vintage', 'newness', 'status', \n", + " 'date_level', 'unsold_di', 'unsold_dl', 'units']\n", + " \n", + " # create empty index; can be used for initializing all dfs\n", + " self.standard_MI_index = pd.MultiIndex(levels=[[]]*len(self.standard_MI_names),\n", + " labels=[[]]*len(self.standard_MI_names),\n", + " names=self.standard_MI_names)\n", + " \n", + " self.standard_MI_empty = pd.DataFrame(index=self.standard_MI_index, columns=['quant'])\n", + " \n", + " self.CIR_columns = ['gen_comp', 'limited_use', 'VRE_acct', 'A_I_A', 'retirement', 'APCR_acct', \n", + " 'env_integrity', 'early_action', 'subtotal']\n", + " \n", + " self.progress_bar_CA_count = 0 # initialize\n", + " self.progress_bar_QC_count = 0 # initialize\n", + " \n", + " self.offset_rate_fract_of_limit = 0.75 # see func offsets_projection for rationale\n", + " \n", + " self.use_fake_data = False # used for testing \n", + " \n", + " # ~~~~~~~~~~~~~~~~~ \n", + " \n", + " # set other variables to be blank; will be reset below using functions \n", + " self.qauct_hist = ''\n", + " self.auction_sales_pcts_all = '' \n", + " self.CA_cap = ''\n", + " self.CA_APCR_MI = ''\n", + " self.CA_advance_MI = ''\n", + " self.VRE_reserve_MI = ''\n", + "\n", + " self.CA_alloc_MI_all = ''\n", + " self.consign_hist_proj = ''\n", + " \n", + " self.QC_cap = ''\n", + " self.QC_advance_MI = ''\n", + " self.QC_APCR_MI = ''\n", + " self.QC_alloc_initial = ''\n", + " self.QC_alloc_trueups = ''\n", + " self.QC_alloc_full_proj = ''\n", + " \n", + " self.qauct_hist = ''\n", + " self.auction_sales_pcts_all = ''\n", + " self.qauct_new_avail = ''\n", + "\n", + " self.compliance_events = ''\n", + " self.VRE_retired = ''\n", + " self.CIR_historical = ''\n", + " self.CIR_offsets_q_sums = ''\n", + " \n", + " self.loading_msg_pre_refresh = []\n", + " self.error_msg_post_refresh = []\n", + " \n", + " self.input_file = ''\n", + " self.CIR_excel = ''\n", + " self.CA_cap_data = ''\n", + " \n", + " self.emissions_ann = ''\n", + " self.emissions_ann_CA = ''\n", + " self.emissions_ann_QC = ''\n", + " self.supply_ann = ''\n", + " self.bank_cumul_pos = ''\n", + " self.balance = ''\n", + " self.unsold_auct_hold_cur_sum = ''\n", + " self.reserve_sales = ''\n", + " \n", + " self.Fig_1_2 = ''\n", + " self.js_download_of_csv = ''\n", + " self.export_df = ''\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "# create object prmt (instance of class Prmt), after which it can be filled with more entries below\n", + "prmt = Prmt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def load_input_files():\n", + " # download each file once from Github, set each as an attribute of object prmt\n", + "\n", + " # main input_file\n", + " try:\n", + " prmt.input_file = pd.ExcelFile(prmt.input_file_raw_url_short)\n", + " # logging.info(\"downloaded input file from short url\")\n", + " # prmt.loading_msg_pre_refresh += [\"Loading input file...\"] # for UI\n", + " except:\n", + " prmt.input_file = pd.ExcelFile(prmt.blob_master + prmt.input_file_raw_url_short)\n", + " logging.info(\"downloaded input file using full url\")\n", + " # prmt.loading_msg_pre_refresh += [\"Downloading input file...\"] # for UI\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CIR quarterly\n", + " try:\n", + " prmt.CIR_excel = pd.ExcelFile(prmt.CIR_raw_url_short)\n", + " logging.info(\"downloaded CIR file from short url\")\n", + " except:\n", + " prmt.CIR_excel = pd.ExcelFile(prmt.blob_master + prmt.CIR_raw_url_short)\n", + " logging.info(\"downloaded CIR file from full url\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def snaps_end_Q4_all_sell_initialize():\n", + " \"\"\"\n", + " Modifies format of object attribute prmt.snaps_end_Q4:\n", + " * formats date columns to be in date format\n", + " * sets MultiIndex = prmt.standard_MI_names, leaves 'snap_q' as column\n", + " \n", + " \"\"\"\n", + " \n", + " # read snaps_end_Q4 from input_file sheet, set new value for object attribute\n", + " prmt.snaps_end_Q4 = pd.read_excel(prmt.input_file, sheet_name='snaps end Q4 all sell out')\n", + " \n", + " # format columns as Period (quarters)\n", + " for col in ['snap_q', 'date_level', 'unsold_di', 'unsold_dl']:\n", + " if isinstance(col, pd.Period):\n", + " pass\n", + " else:\n", + " prmt.snaps_end_Q4[col] = pd.to_datetime(prmt.snaps_end_Q4[col]).dt.to_period('Q')\n", + " \n", + " # restore np.NaN (replacing the way Excel saves them)\n", + " for col in ['auct_type', 'inst_cat', 'newness', 'status']:\n", + " prmt.snaps_end_Q4[col] = prmt.snaps_end_Q4[col].replace(np.NaN, 'n/a')\n", + " \n", + " # set MultiIndex as standard_MI_names; snap_q will remain as column next to 'quant'\n", + " prmt.snaps_end_Q4 = prmt.snaps_end_Q4.set_index(prmt.standard_MI_names)\n", + " \n", + " # calculate sum (for testing); set as new value of object attribute\n", + " prmt.snaps_end_Q4_sum = prmt.snaps_end_Q4['quant'].sum()\n", + " \n", + " # no return; modifies object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run function to download files\n", + "load_input_files()\n", + "\n", + "# get snaps_end_Q4\n", + "snaps_end_Q4_all_sell_initialize()\n", + "\n", + "# set CA_cap_data\n", + "prmt.CA_cap_data = pd.read_excel(prmt.input_file, sheet_name='CA cap data')\n", + "logging.info(\"read CA_cap_data\")\n", + "\n", + "# set CA_cap_adjustment_factor\n", + "prmt.CA_cap_adjustment_factor = prmt.CA_cap_data[\n", + " prmt.CA_cap_data['name']=='CA_cap_adjustment_factor'].set_index('year')['data']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# from WCI_model_explainer_text_v2.py (2018-10-10)\n", + "\n", + "figure_explainer_text = \"

Below, the figure on the left shows covered emissions compared with the supply of compliance instruments (allowances and offsets) that enter private market participants’ accounts through auction sales or direct allocations from WCI governments.


The model tracks the private bank of allowances, defined as the number of allowances held in private accounts in excess of compliance obligations those entities face under the program in any given year. When the supply of compliance instruments entering private accounts is greater than covered emissions in a given year, the private bank increases. When the supply of compliance instruments entering private accounts is less than covered emissions, the private bank decreases.


The figure on the right shows the running total of compliance instruments banked in private accounts. In addition, the graph shows any allowances that went unsold in auctions. These allowances are held in government accounts until they are either reintroduced at a later auction or removed from the normal auction supply subject to market rules.


If the private bank is exhausted, the model simulates reserve sales to meet any remaining outstanding compliance obligations, based on the user-defined emissions projection. Starting in 2021, if the supply of allowances held in government-controlled reserve accounts is exhausted, then an unlimited quantity of instruments called “price ceiling units” will be available at a price ceiling to meet any remaining compliance obligations. The model tracks the sale of reserve allowances and price ceiling units in a single composite category.


For more information about the banking metric used here, see Near Zero's Sep. 2018 report, Tracking Banking in the Western Climate Initiative Cap-and-Trade Program.

\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "\n", + "em_explainer_text = \"

The WCI cap-and-trade program covers emissions from electricity suppliers, large industrial facilities, and natural gas and transportation fuel distributors.


By default, the model uses a projection in which covered emissions decrease 2% per year, starting from emissions in 2016 (the latest year with official reporting data). Users can specify higher or lower emissions scenarios using the available settings.


A 2% rate of decline follows ARB's 2017 Scoping Plan scenario for California emissions, which includes the effects of prescriptive policy measures (e.g., the Renewables Portfolio Standard for electricity), but does not incorporate effects of the cap-and-trade program.


Note that PATHWAYS, the model ARB used to generate the Scoping Plan scenario, does not directly project covered emissions in California. Instead, the PATHWAYS model tracks emissions from four economic sectors called “covered sectors,” which together constitute about ~10% more emissions than the “covered emissions” that are actually subject to the cap-and-trade program in California. For more information, see Near Zero's May 2018 report on this discrepancy. Users can define their own emission projections to explore any scenario they like, as the model makes no assumptions about future emissions aside from what the user provides.

\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "# note: no
between

and

for this text block\n", + "em_custom_footnote_text = \"

Copy and paste from data table in Excel.

Format: column for years on left, column for emissions data on right. Please copy only the data, without headers (see example).

Projection must cover each year from 2017 to 2030. (Data entered for years prior to 2017 and after 2030 will be discarded.)

Units must be million metric tons CO2e/year (MMTCO2e).

\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "\n", + "auction_explainer_text = \"

WCI quarterly auctions include two separate offerings: a current auction of allowances with vintage years equal to the current calendar year (as well as any earlier vintages of allowances that went unsold and are being reintroduced), and a separate advance auction featuring a limited number of allowances with a vintage year equal to three years in the future.


By default, the model assumes that all future auctions sell out. However, users can specify a custom percentage of allowances to go unsold at auction in one or more years. This percentage applies to both current and advance auctions, in each quarter of the user-specified years.


To date, most current auctions have sold out. But in 2016 and 2017, 143 million current allowances went unsold as sales collapsed over several auctions. Pursuant to market rules, most of these allowances are now being reintroduced for sale in current auctions.


If California state-owned allowances remain unsold for more than 24 months, they are removed from the normal auction supply and transferred to the market reserve accounts. Quebec's current regulations do not contain a similar stipulation. We calculate that this self-correction mechanism will remove 38 – 52 million previously unsold allowances from the normal auction supply, with the exact amount dependent on the outcomes of the next two quarterly auctions. The remaining 91 – 105 million allowance will have been reintroduced at auction.


For more information, see Near Zero's May 2018 report on this self correction mechanism.

\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "\n", + "offsets_explainer_text = \"

In addition to submitting allowances to satisfy their compliance obligations, entities subject to the cap-and-trade program can also submit a certain number of offset credits instead. These credits represent emission reductions that take place outside of the cap-and-trade program and are credited pursuant to an approved offset protocol.


For California, the limits on offset usage are equal to a percentage of a covered entity’s compliance obligations: through 2020, the limit is 8%; from 2021 through 2025, the limit is 4%; and from 2026 through 2030, the limit is 6%. For Quebec, the limit is 8% for all years.


The model incorporates actual offset supply through Q3 2018, based on ARB’s Q3 2018 compliance instrument report for the WCI system. By default, the model assumes offset supply in any year is equivalent to three-quarters of the limit in each jurisdiction, reflecting ARB’s assumptions in the current proposed cap-and-trade regulations. Users can specify a higher or lower offset supply using the available settings.


Like allowances, offsets can also be banked for future use. Thus, we include offsets in our banking calculations. If the user-specified offset supply exceeds what can be used through 2030, given the user-specified emissions projection, then the model calculates this excess and warns the user.


For more on offsets, see Near Zero’s Mar. 2018 report, Interpreting AB 398’s Carbon Offset Limits. For more information on offset credits’ role in banking, see Near Zero's Sep. 2018 report, Tracking Banking in the Western Climate Initiative Cap-and-Trade Program.

\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# AUCTION METADATA KEY" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "**newness:**\n", + "* 'new' (fka 'newly available'; means *never* before introduced)\n", + "* 'reintro' (we use \"reintroduction\" only for state-owned that went unsold in current & are brought back again)\n", + "* 'redes' [defunct?]\n", + "\n", + "**status:**\n", + "* 'available'\n", + "* 'sold'\n", + "* 'unsold' (use for unsold stock; can be made available under right circumstances)\n", + "* 'not_avail'\n", + "\n", + "**auct_type:**\n", + "* 'current'\n", + "* 'advance'\n", + "* 'reserve'\n", + "\n", + "**juris:** (jurisdiction)\n", + "* CA, QC, ON\n", + "\n", + "**inst_cat:**\n", + "* 'CA'\n", + "* 'CA_alloc'\n", + "* 'consign' (could be elec or nat gas, IOU or POU)\n", + "* 'QC'\n", + "* 'QC\\_alloc\\_[year]'\n", + "* 'ON'\n", + "* 'QC\\_alloc\\_[year]\\_APCR' (anomalous; only used once so far)\n", + "\n", + "**date_level:**\n", + "* for auctions, it is either:\n", + " * the latest date in which allowances were auctioned\n", + " * the future date in which they're scheduled to be auctioned\n", + "* for allocations, it is the date in which they were distributed\n", + "* for retirements (i.e., VRE), it is the date in which they were retired" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# HOUSEKEEPING FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def multiindex_change(df, mapping_dict): \n", + " \"\"\"\n", + " Housekeeping function: updates an index level, even when repeated values in the index.\n", + " \n", + " Reason for this:\n", + " Pandas .index.set_levels is limited in how it works, and when there are repeated values in the index level,\n", + " it runs, but with spurious results.\n", + " \n", + " Note: This function does not work on Series, because Pandas doesn't include Series.set_index.\n", + "\n", + " mapping_dict is dictionary with each key = level_name & each value = ''\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " try:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + " except:\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name}\")\n", + "\n", + " # get index names before changing anything\n", + " df_index_names = df.index.names\n", + " \n", + " # create empty list (initialization) in which all changed data will be put\n", + " df_level_changed_all = []\n", + " \n", + " for level_name in mapping_dict.keys():\n", + " df_level_changed = df.index.get_level_values(level_name).map(lambda i: mapping_dict[level_name]) \n", + " df.index = df.index.droplevel(level_name)\n", + " \n", + " df_level_changed_all += [df_level_changed]\n", + "\n", + " # after making changes to all levels in dict\n", + " df = df.set_index(df_level_changed_all, append=True)\n", + " df = df.reorder_levels(df_index_names)\n", + " \n", + " return(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_ser_to_df_MI(ser):\n", + " \"\"\"\n", + " Converts certain Series into MultiIndex df. Works for cap, APCR, advance, VRE.\n", + " \n", + " (Now apparently used only in initialization for CA and QC auctions.)\n", + " \n", + " Housekeeping function.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start), for {ser.name}\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " \n", + " if len(df.columns==1):\n", + " df.columns = ['quant']\n", + " else:\n", + " print(\"Error\" + \"! In convert_cap_to_MI, len(df.columns==1) was False.\")\n", + " \n", + " if ser.name.split('_')[0] in ['CA', 'VRE']:\n", + " juris = 'CA'\n", + " elif ser.name.split('_')[0] == 'QC':\n", + " juris = 'QC'\n", + " \n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + "\n", + " # default metadata values are for cap\n", + " df['acct_name'] = 'alloc_hold'\n", + " df['juris'] = juris # established above\n", + " df['inst_cat'] = 'cap'\n", + " # vintage already assigned above\n", + " df['auct_type'] = 'n/a'\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['date_level'] = prmt.NaT_proxy\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " # overwrite metadata for other sets of instruments\n", + " if 'APCR' in ser.name:\n", + " df['acct_name'] = 'APCR_acct'\n", + " df['inst_cat'] = 'APCR'\n", + " df['auct_type'] = 'reserve'\n", + " elif 'advance' in ser.name:\n", + " df['acct_name'] = 'auct_hold'\n", + " df['inst_cat'] = ser.name.split('_')[0] # same as juris\n", + " df['auct_type'] = 'advance'\n", + " df['newness'] = 'new'\n", + " df['status'] = 'not_avail'\n", + " elif 'VRE' in ser.name:\n", + " df['acct_name'] = 'VRE_acct'\n", + " # df['juris'] = 'CA'\n", + " df['inst_cat'] = 'VRE_reserve'\n", + " df['status'] = 'n/a'\n", + " else:\n", + " pass\n", + " \n", + " df = df.set_index(prmt.standard_MI_names)\n", + " return(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_ser_to_df_MI_CA_alloc(ser):\n", + " \"\"\"\n", + " Converts certain Series into MultiIndex df. Works for CA allocations.\n", + " \n", + " Housekeeping function\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start), for series {ser.name}\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + " \n", + " not_consign_list = ['elec_POU_not_consign', 'nat_gas_not_consign', 'industrial_etc_alloc']\n", + "\n", + " if ser.name in not_consign_list:\n", + " df['acct_name'] = 'ann_alloc_hold'\n", + " df['auct_type'] = 'n/a'\n", + " df['juris'] = 'CA'\n", + " df = df.rename(columns={'alloc': 'quant'})\n", + "\n", + " elif ser.name in ['consign_elec_IOU', 'consign_elec_POU', 'consign_nat_gas']:\n", + " df['acct_name'] = 'limited_use'\n", + " df['auct_type'] = 'current'\n", + " df['juris'] = 'CA'\n", + " df = df.rename(columns={'alloc': 'quant'})\n", + " # TO DO: ¿also change inst_cat?\n", + " # don't change newness to new, nor status to not_avail, until consign are in auct_hold\n", + " \n", + " else: # closing 'if alloc.name in not_consign_list:'\n", + " print(\"Error\" + \"!: Series name is not in either list above.\")\n", + " \n", + " # acct_name set above\n", + " df['date_level'] = prmt.NaT_proxy\n", + " # juris set above\n", + " # vintage set above\n", + " df['inst_cat'] = ser.name\n", + " # auct_type set above\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + " \n", + " # rename column with quantities of allowances from ser.name to 'quant'\n", + " df = df.rename(columns={ser.name: 'quant'})\n", + " df_MI = df.set_index(prmt.standard_MI_names)\n", + "\n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end), for series {ser.name}\")\n", + " \n", + " return(df_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_ser_to_df_MI_QC_alloc(ser):\n", + " \"\"\"\n", + " Converts certain Series into MultiIndex df. Works for QC allocation.\n", + " \n", + " Will put the QC_alloc into gen_acct.\n", + " \n", + " Housekeeping function.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + " \n", + " if 'QC_alloc' in ser.name:\n", + " df['acct_name'] = 'gen_acct'\n", + " df['auct_type'] = 'n/a'\n", + " df['juris'] = 'QC'\n", + " # vintage set above\n", + " df['inst_cat'] = f'QC_alloc_{cq.date.year}'\n", + " df['date_level'] = cq.date\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + " \n", + " df = df.rename(columns={'QC_alloc': 'quant'})\n", + " \n", + " else: # closing 'if alloc.name in\n", + " print(\"Error\" + \"!: Series name is not in list above. Metadata not added.\")\n", + "\n", + " df = df.set_index(prmt.standard_MI_names)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# NEW FOR ONLINE (updated version)\n", + "\n", + "def quarter_period(year_quart):\n", + " \"\"\"\n", + " Converts string year_quart (i.e., '2013Q4') into datetime quarterly period.\n", + " \"\"\"\n", + " \n", + " if isinstance(year_quart, pd.Period) == True:\n", + " period = year_quart\n", + " \n", + " else:\n", + " period = pd.to_datetime(year_quart).to_period('Q')\n", + " \n", + " return(period)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# INITIALIZATION FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_qauct_hist():\n", + " \"\"\"\n", + " Read historical auction data from file qauct_hist.\n", + " \n", + " Covers all auctions through 2018Q3, for CA, QC, ON\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialize: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # qauct_hist is a full record of auction data, compiled from csvs using another notebook\n", + " qauct_hist = pd.read_excel(prmt.input_file, sheet_name='quarterly auct hist')\n", + " \n", + " # rename field 'auction date' to 'date_level'\n", + " qauct_hist = qauct_hist.rename(columns={'auction date': 'date_level'})\n", + " \n", + " # format 'date_level' as quarter period\n", + " qauct_hist['date_level'] = pd.to_datetime(qauct_hist['date_level']).dt.to_period('Q')\n", + " \n", + " # set object attribute\n", + " prmt.qauct_hist = qauct_hist\n", + " \n", + " # no return; func sets object attribute" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_auction_sales_pcts_all():\n", + " \"\"\"\n", + " Combine historical and projection, and clean up to remove overlap.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # call functions to get historical and projection data\n", + " auction_sales_pcts_historical = get_auction_sales_pcts_historical()\n", + " auction_sales_pcts_projection = get_auction_sales_pcts_projection_from_user_settings()\n", + " \n", + " # get date of last quarter of historical data, for eliminating overlap\n", + " auction_sales_last_historical_q = auction_sales_pcts_historical.index.get_level_values('date_level').max()\n", + "\n", + " # remove overlapping quarters from auction_sales_pcts_projection\n", + " df = auction_sales_pcts_projection.copy()\n", + " df = df.loc[df.index.get_level_values('date_level') > auction_sales_last_historical_q]\n", + "\n", + " # append remaining projection to historical\n", + " df = auction_sales_pcts_historical.append(df)\n", + " df = df.astype(float)\n", + " \n", + " prmt.auction_sales_pcts_all = df\n", + "\n", + " # no return; func sets object attribute" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_auction_sales_pcts_historical():\n", + " \"\"\"\n", + " Calculates historical auction sales percentages, drawing from historical record (qauct_hist).\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create record of auction sales percentages (from qauct_hist)\n", + " df = prmt.qauct_hist.copy()\n", + " df = df[~df['inst_cat'].isin(['IOU', 'POU'])]\n", + " df = df.groupby(['market', 'auct_type', 'date_level'])[['Available', 'Sold']].sum()\n", + " df['sold_pct'] = df['Sold'] / df['Available']\n", + "\n", + " auction_sales_pcts_historical = df['sold_pct']\n", + " \n", + " return(auction_sales_pcts_historical)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_auction_sales_pcts_projection_from_user_settings():\n", + " \"\"\"\n", + " Read values for auction sales percentages in projection, as specified by user interface.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " years_not_sold_out = prmt.years_not_sold_out\n", + " fract_not_sold = prmt.fract_not_sold\n", + " \n", + " if prmt.online_settings_auction == True:\n", + " proj = []\n", + " market = 'CA-QC' \n", + " \n", + " # fill in projection using user settings for years_not_sold_out & fract_not_sold\n", + " for year in range(2018, 2030+1):\n", + " for quarter in [1, 2, 3, 4]:\n", + " date_level = quarter_period(f\"{year}Q{quarter}\")\n", + " \n", + " # add current auction projections\n", + " # any quarters that overlap with historical data will be discarded when hist & proj are combined\n", + " auct_type = 'current' \n", + " if year in years_not_sold_out:\n", + " proj += [(market, auct_type, date_level, (1 - fract_not_sold))]\n", + " else:\n", + " # set to fract_not_sold to 0% (sold is 100%) for all years not in years_not_sold_out\n", + " proj += [(market, auct_type, date_level, 1.0)]\n", + " \n", + " # add advance auction projections; assume all auctions sell 100%\n", + " # any quarters that overlap with historical data will be discarded when hist & proj are combined\n", + " auct_type = 'advance'\n", + " if year in years_not_sold_out:\n", + " proj += [(market, auct_type, date_level, (1 - fract_not_sold))]\n", + " else:\n", + " # set to fract_not_sold to 0% (sold is 100%) for all years not in years_not_sold_out\n", + " proj += [(market, auct_type, date_level, 1.0)]\n", + " \n", + " proj_df = pd.DataFrame(proj, columns=['market', 'auct_type', 'date_level', 'value'])\n", + " ser = proj_df.set_index(['market', 'auct_type', 'date_level'])['value']\n", + " ser = ser.sort_index()\n", + " auction_sales_pcts_projection = ser\n", + " \n", + " else: \n", + " # model will use default auction sales projection of 100% sales every quarter after 2018Q3\n", + " pass\n", + " \n", + " return(auction_sales_pcts_projection)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_cap():\n", + " \"\"\"\n", + " CA cap quantities from § 95841. Annual Allowance Budgets for Calendar Years 2013-2050:\n", + " * Table 6-1: 2013-2020 California GHG Allowance Budgets\n", + " * Table 6-2: 2021-2031 California GHG Allowance Budgets\n", + " * 2032-2050: equation for post-2031 cap\n", + " \"\"\"\n", + " \n", + " CA_cap_data = prmt.CA_cap_data\n", + " \n", + " CA_cap = CA_cap_data[CA_cap_data['name']=='CA_cap']\n", + " CA_cap = CA_cap.set_index('year')['data']\n", + " CA_cap = CA_cap.loc[:2030]\n", + " CA_cap.name = 'CA_cap'\n", + "\n", + " logging.info('initialize: CA_cap')\n", + " \n", + " return(CA_cap)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_APCR():\n", + " \"\"\"\n", + " In current regs (Oct 2017), quantities for APCR for budget years 2013-2020 defined as percentage of budget. \n", + " 2013-2020 specified in regs § 95870(a):\n", + " * (1) One percent of the allowances from budget years 2013-2014;\n", + " * (2) Four percent of the allowances from budget years 2015-2017; and\n", + " * (3) Seven percent of the allowances from budget years 2018-2020.\n", + "\n", + " In current regs (Oct 2017), quantities for APCR for budget years 2021-2030 defined as total quantities.\n", + " (See § 95871(a) and Table 8-2 (as of Oct 2017))\n", + " \n", + " In proposed new regs (Sep 2018), quantities for APCR for budget years 2021-2030 defined as total quantities.\n", + " (See § 95871(a) and Table 8-2 (as of Sep 2018), which is updated from Oct 2017 version of regs)\n", + " \"\"\"\n", + " \n", + " logging.info('initialize_CA_APCR')\n", + "\n", + " CA_cap = prmt.CA_cap\n", + " CA_cap_data = prmt.CA_cap_data\n", + " CA_post_2020_regs = prmt.CA_post_2020_regs\n", + "\n", + " # for 2013-2020: get cap & reserve fraction from input file\n", + " # calculate APCR amounts\n", + " CA_APCR_fraction = CA_cap_data[CA_cap_data['name']=='CA_APCR_fraction']\n", + " CA_APCR_fraction = CA_APCR_fraction.set_index('year')['data']\n", + " CA_APCR_2013_2020 = CA_cap * CA_APCR_fraction\n", + " CA_APCR_2013_2020 = CA_APCR_2013_2020.loc[2013:2020]\n", + "\n", + " # for 2021-2031: get APCR amounts from input file\n", + " CA_APCR_2021_2031 = CA_cap_data[CA_cap_data['name']=='CA_APCR']\n", + " CA_APCR_2021_2031 = CA_APCR_2021_2031.set_index('year')['data']\n", + "\n", + " # only keep through 2030\n", + " CA_APCR_2021_2030 = CA_APCR_2021_2031.loc[2021:2030]\n", + "\n", + " CA_APCR = CA_APCR_2013_2020.append(CA_APCR_2021_2030)\n", + " CA_APCR.name = 'CA_APCR'\n", + "\n", + " # new regulations for CA:\n", + " if prmt.CA_post_2020_regs in ['Preliminary_Discussion_Draft', 'Proposed_Regs_Sep_2018']:\n", + " # move additional 2% of cap for 2026-2030 to APCR; \n", + " # do this by removing equal amount from each annual budget 2021-2030\n", + " # as stated in \"Price Concepts\" paper, this is 2.272600 MMTCO2e per year\n", + " # and as stated in the \"Post-2020 Caps\" paper, it would be a total of ~22.7M allowances\n", + " CA_APCR_extra_sum = CA_cap.loc[2026:2030].sum() * 0.02\n", + " CA_APCR_extra_ann = CA_APCR_extra_sum / len(range(2021, 2030+1))\n", + " CA_APCR_new_2021_2030 = CA_APCR.loc[2021:2030] + CA_APCR_extra_ann\n", + " CA_APCR = CA_APCR.loc[2013:2020].append(CA_APCR_new_2021_2030)\n", + " # if other proposals for new regulations, but them here\n", + " else:\n", + " pass\n", + " \n", + " CA_APCR_MI = convert_ser_to_df_MI(CA_APCR)\n", + "\n", + " logging.info('initialize: CA_APCR')\n", + " \n", + " return(CA_APCR_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_advance():\n", + " \"\"\"\n", + " Fraction of CA cap that is set aside for advance is defined in regulations.\n", + " \n", + " For 2013-2020: § 95870(b)\n", + " For 2021-2030: § 95871(b)\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info('initialize: CA_advance')\n", + " \n", + " CA_cap = prmt.CA_cap\n", + " CA_cap_data = prmt.CA_cap_data\n", + " \n", + " CA_advance_fraction = CA_cap_data[CA_cap_data['name']=='CA_advance_fraction']\n", + " CA_advance_fraction = CA_advance_fraction.set_index('year')['data']\n", + "\n", + " CA_advance = (CA_cap * CA_advance_fraction).fillna(0)\n", + " CA_advance.name ='CA_advance'\n", + "\n", + " CA_advance_MI = convert_ser_to_df_MI(CA_advance)\n", + "\n", + " return(CA_advance_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_VRE_reserve():\n", + " \"\"\"\n", + " DOCSTRINGS\n", + " \"\"\"\n", + " logging.info('initialize_VRE_reserve')\n", + " \n", + " CA_cap = prmt.CA_cap\n", + " CA_cap_data = prmt.CA_cap_data\n", + "\n", + " VRE_fraction = CA_cap_data[CA_cap_data['name']=='CA_Voluntary_Renewable_fraction']\n", + " VRE_fraction = VRE_fraction.set_index('year')['data']\n", + "\n", + " VRE_reserve = CA_cap * VRE_fraction\n", + "\n", + " for year in range(2021, 2030+1):\n", + " VRE_reserve.at[year] = float(0)\n", + "\n", + " VRE_reserve.name = 'VRE_reserve'\n", + "\n", + " VRE_reserve_MI = convert_ser_to_df_MI(VRE_reserve)\n", + " \n", + " return(VRE_reserve_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def read_CA_alloc_data():\n", + " \"\"\" \n", + " Reads historical allocation data, as well as cap adjustment factors\n", + " \n", + " CA allocations use cap adjustment factor from § 95891, Table 9-2\n", + " \n", + " note: Input file only includes the standard cap adjustment factors.\n", + " \n", + " If need be, can add to input file the non-standard for particular process intensive industries.\n", + " \"\"\"\n", + " logging.info('read_CA_alloc_data')\n", + " \n", + " CA_alloc_data = pd.read_excel(prmt.input_file, sheet_name='CA alloc data')\n", + " \n", + " return(CA_alloc_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_elec_alloc():\n", + " \"\"\"\n", + " 2021-2030 Electrical Distribution Utility Sector Allocation (IOU & POU):\n", + "\n", + " 2021-2030: § 95871(c)(1)\n", + " details determined by 95892(a), with allocation quantities explicitly stated in § 95892 Table 9-4\n", + " (data copied from pdf (opened in Adobe Reader) into Excel; saved in input file)\n", + " but utilities not identified in Table 9-4 as IOU or POU\n", + " so merge with 2013-2020 df, and then compute sums for whole time span 2013-2030\n", + " (also note this does not go through 2031, as cap does)\n", + " \"\"\"\n", + "\n", + " logging.info('initialize_elec_alloc')\n", + " \n", + " # create elec_alloc_2013_2020\n", + "\n", + " # read input file; \n", + " # has '-' for zero values in some cells; make those NaN, replace NaN with zero; then clean up strings\n", + " df = pd.read_excel(prmt.input_file, sheet_name='CA elec alloc 2013-2020', na_values='-')\n", + " df = df.fillna(0)\n", + " df = df.replace('\\xa0', '', regex=True)\n", + "\n", + " # convert all data to int, which rounds down any fractional allowances\n", + " for column in range(2013, 2020+1):\n", + " df[column] = df[column].astype(int)\n", + "\n", + " df = df.rename(columns={'Utility Name': 'Utility Name (2013-2020)'})\n", + "\n", + " # in original file, total was in a row at the end, with no label\n", + " # there was also an empty row between the total row and the rows with data by utility\n", + " # both the total row and empty row have '0' as utility name, because of fillna(0) above\n", + " # so only keep rows with 'Utility Name (2013-2020)' not 0\n", + " df = df[df['Utility Name (2013-2020)']!=0]\n", + "\n", + " elec_alloc_2013_2020 = df\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # create elec_alloc_2021_2030\n", + " df = pd.read_excel(prmt.input_file, sheet_name='CA elec alloc 2021-2030')\n", + "\n", + " # clean up data, including in column headers\n", + " # strip out line breaks (\\xa0) & spaces & commas\n", + " df = df.replace('\\xa0', '', regex=True)\n", + " df.columns = df.columns.str.strip('\\xa0')\n", + " df = df.rename(columns={'Utility': 'Utility Name'})\n", + " df = df.set_index('Utility Name')\n", + " df.columns = df.columns.astype(int)\n", + " df = df.replace(',', '', regex=True)\n", + "\n", + " # convert all column names to int\n", + " for column in range(2021, 2030+1):\n", + " df[column] = df[column].astype(int)\n", + " df = df.reset_index()\n", + "\n", + " # rename utilities according to map I created between 2013-2020 and 2021-2030 versions\n", + " CA_util_names_map = pd.read_excel(prmt.input_file, sheet_name='CA util names map')\n", + " CA_util_names_map = CA_util_names_map.replace('\\xa0', '', regex=True)\n", + " CA_util_names_map.columns = CA_util_names_map.columns.str.strip('\\xa0')\n", + "\n", + " df = pd.merge(df, CA_util_names_map, left_on='Utility Name', right_on='Utility Name (2021-2030)', how='outer')\n", + " df = df.drop(['Utility Name', 'notes'], axis=1)\n", + "\n", + " elec_alloc_2021_2030 = df\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: create elec_alloc_IOU & elec_alloc_POU')\n", + " \n", + " # create elec_alloc_IOU & elec_alloc_POU (units MMTCO2e)\n", + " df = pd.merge(elec_alloc_2013_2020, elec_alloc_2021_2030,\n", + " left_on='Utility Name (2013-2020)', right_on='Utility Name (2013-2020)', how='outer')\n", + "\n", + " df['Utility Type'] = df['Utility Type'].replace('COOP', 'POU')\n", + " df = df.groupby('Utility Type').sum().T\n", + " df.index = df.index.astype(int)\n", + " df = df / 1e6\n", + "\n", + " elec_alloc_IOU = df['IOU']\n", + " elec_alloc_IOU.name = 'elec_alloc_IOU'\n", + " elec_alloc_POU = df['POU']\n", + " elec_alloc_POU.name = 'elec_alloc_POU'\n", + "\n", + " # elec_alloc_IOU and elec_alloc_POU are transferred to appropriate accounts later, in consignment section\n", + " \n", + " return(elec_alloc_IOU, elec_alloc_POU)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_nat_gas_alloc(CA_alloc_data):\n", + " # historical data from annual allocation reports; stored in input file\n", + " # have to use these historical values to calculate 2011 natural gas supplier emissions\n", + " # once 2011 natural gas supplier emissions has been calculated, can use equation in regulations for projections\n", + "\n", + " CA_cap_adjustment_factor = prmt.CA_cap_adjustment_factor\n", + " \n", + " logging.info('initialize: nat_gas_alloc')\n", + " \n", + " nat_gas_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='nat_gas_alloc']\n", + " nat_gas_alloc['year'] = nat_gas_alloc['year'].astype(int)\n", + " nat_gas_alloc = nat_gas_alloc.set_index('year')['data']\n", + "\n", + " # not clear from MRR which emissions are credited to natural gas suppliers, or which emissions regs are referring to\n", + " # but can infer what emissions in 2011 ARB used for calculating allocations disbursed to date (2015-2017)\n", + " # emissions in 2011 = reported allocations for year X / adjustment factor for year X\n", + " # can calculate emissions in 2011 from this equation for any particular year;\n", + " # to avoid rounding errors, can calculate mean of ratios from each year\n", + " nat_gas_emissions_2011_inferred = (nat_gas_alloc / CA_cap_adjustment_factor).mean()\n", + "\n", + " # get last historical year of nat_gas_alloc\n", + " nat_gas_alloc_last_year = nat_gas_alloc.index[-1]\n", + "\n", + " # calculate allocation for all future years\n", + " for future_year in range(nat_gas_alloc_last_year, 2031+1):\n", + " nat_gas_alloc_future = nat_gas_emissions_2011_inferred * CA_cap_adjustment_factor.at[future_year]\n", + " nat_gas_alloc.at[future_year] = nat_gas_alloc_future\n", + "\n", + " # add data points with zeros to make later steps easier\n", + " nat_gas_alloc.at[2013] = float(0)\n", + " nat_gas_alloc.at[2014] = float(0)\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " nat_gas_alloc = nat_gas_alloc / 1e6\n", + "\n", + " nat_gas_alloc.name = 'nat_gas_alloc'\n", + " \n", + " return(nat_gas_alloc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_industrial_etc_alloc(CA_alloc_data):\n", + " \n", + " CA_cap_adjustment_factor = prmt.CA_cap_adjustment_factor\n", + " \n", + " logging.info('initialize: industrial_alloc')\n", + " \n", + " industrial_alloc = CA_alloc_data.copy()[CA_alloc_data['name'].isin(['industrial_alloc', 'industrial_and_legacy_gen_alloc'])]\n", + " industrial_alloc['year'] = industrial_alloc['year'].astype(int)\n", + " industrial_alloc = industrial_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " industrial_alloc = industrial_alloc/1e6\n", + "\n", + " industrial_alloc.name = 'industrial_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: water_alloc')\n", + " water_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='water_alloc']\n", + " water_alloc['year'] = water_alloc['year'].astype(int)\n", + " water_alloc = water_alloc.set_index('year')\n", + " water_alloc = water_alloc['data']\n", + " water_alloc = water_alloc.astype(float)\n", + "\n", + " # § 95895(b): \"2021 and subsequent years\"\n", + " # (calculate values 2021-2031, and also combine below with values 2015-2020)\n", + "\n", + " # for post-2020, method is:\n", + " # allocation = 47,853 × cap_adjustment_factor (by year)\n", + "\n", + " # get base level (value 47,853 allowances; stored in input file)\n", + " water_alloc_post_2020_base_level = CA_alloc_data.set_index('name').at['water_alloc_post_2020_base_level', 'data']\n", + "\n", + " for year in range(2021, 2030+1):\n", + " water_alloc_year = water_alloc_post_2020_base_level * CA_cap_adjustment_factor[year]\n", + " water_alloc.at[year] = water_alloc_year\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " water_alloc = water_alloc / 1e6\n", + "\n", + " water_alloc.name = 'water_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: university_alloc')\n", + " university_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='university_alloc']\n", + " university_alloc['year'] = university_alloc['year'].astype(int)\n", + " university_alloc = university_alloc.set_index('year')['data']\n", + "\n", + " university_alloc.name = 'university_alloc'\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " university_alloc = university_alloc / 1e6\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: legacy_gen_alloc')\n", + " legacy_gen_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='legacy_gen_alloc']\n", + " legacy_gen_alloc['year'] = legacy_gen_alloc['year'].astype(int)\n", + " legacy_gen_alloc = legacy_gen_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " legacy_gen_alloc = legacy_gen_alloc / 1e6\n", + "\n", + " legacy_gen_alloc.name = 'legacy_gen_alloc'\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: thermal_output_alloc')\n", + " \n", + " # variable allocation\n", + " thermal_output_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='thermal_output_alloc']\n", + " thermal_output_alloc['year'] = thermal_output_alloc['year'].astype(int)\n", + " thermal_output_alloc = thermal_output_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " thermal_output_alloc = thermal_output_alloc / 1e6\n", + "\n", + " thermal_output_alloc.name = 'thermal_output_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: waste_to_energy_alloc')\n", + " waste_to_energy_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='waste_to_energy_alloc']\n", + " waste_to_energy_alloc['year'] = waste_to_energy_alloc['year'].astype(int)\n", + " waste_to_energy_alloc = waste_to_energy_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " waste_to_energy_alloc = waste_to_energy_alloc / 1e6\n", + "\n", + " waste_to_energy_alloc.name = 'waste_to_energy_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: LNG_supplier_alloc')\n", + " # variable allocation\n", + " LNG_supplier_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='LNG_supplier_alloc']\n", + " LNG_supplier_alloc['year'] = LNG_supplier_alloc['year'].astype(int)\n", + " LNG_supplier_alloc = LNG_supplier_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " LNG_supplier_alloc = LNG_supplier_alloc / 1e6\n", + "\n", + " LNG_supplier_alloc.name = 'LNG_supplier_alloc'\n", + "\n", + " industrial_etc_alloc_list = [industrial_alloc, water_alloc, university_alloc, legacy_gen_alloc, \n", + " thermal_output_alloc, waste_to_energy_alloc, LNG_supplier_alloc]\n", + "\n", + " industrial_etc_alloc = pd.concat(industrial_etc_alloc_list, axis=1).sum(axis=1)\n", + " industrial_etc_alloc.name = 'industrial_etc_alloc_hist'\n", + "\n", + " # calculate what allocation would be in case for all assistance factors at 100% for 2018-2020\n", + " # assume that the resulting allocation for each year would be:\n", + "\n", + " idealized = pd.Series()\n", + " for year in range(2018, 2030+1):\n", + " cap_adj_ratio_year = CA_cap_adjustment_factor.at[year] / CA_cap_adjustment_factor.at[2017]\n", + " idealized.at[year] = industrial_etc_alloc.at[2017] * cap_adj_ratio_year\n", + "\n", + " # compare against ARB's projection from 2018-03-02 workshop presentation, slide 9\n", + " # (as extracted using WebPlotDigitizer)\n", + " ARB_proj = pd.read_excel(prmt.input_file, sheet_name='ARB allocs to 2030')\n", + " ARB_proj = ARB_proj[['year', 'industrial and other allocation (estimate) [WPD]']].set_index('year')\n", + " ARB_proj = ARB_proj[ARB_proj.columns[0]]\n", + " ARB_proj.name = 'industrial_etc_alloc_ARB_proj'\n", + "\n", + " # ARB's graph shows industrial & other allocations somewhat higher (~ +0.5 M / year) over historical period\n", + " # and somewhat lower (~ -0.5 M/year) than my projection based on 2017\n", + "\n", + " # there is uncertainty about what this projection will be, since it depends on activity\n", + " # the two are within ~1.5%, which is close enough\n", + "\n", + " # true-ups to make up for lower assistance factors in 2018-2019 will be applied retroactively, in 2020 & 2021\n", + " CA_trueups_retro = (idealized - ARB_proj).loc[2018:2019]\n", + " CA_trueups_retro.index = CA_trueups_retro.index + 2\n", + " CA_trueups_retro.name = 'CA_trueups_retro'\n", + "\n", + " # allocation for 2020 will use 100% assistance factor\n", + " CA_additional_2020 = (idealized - ARB_proj).loc[2020:2020]\n", + " CA_additional_2020.name = 'CA_additional_2020'\n", + "\n", + " # identify last historical year of data\n", + " last_hist_year = industrial_alloc.index[-1]\n", + "\n", + " # combine the 4 pieces: historical, projection with lower assistance factors, trueups_retro, and additional\n", + " industrial_etc_alloc = pd.concat([industrial_etc_alloc.loc[:last_hist_year], \n", + " ARB_proj.loc[last_hist_year+1:], \n", + " CA_trueups_retro, \n", + " CA_additional_2020], \n", + " axis=1).sum(axis=1)\n", + " industrial_etc_alloc.name = 'industrial_etc_alloc'\n", + " \n", + " return(industrial_etc_alloc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_consign_historical_and_projection_annual(elec_alloc_IOU, elec_alloc_POU, nat_gas_alloc):\n", + " \"\"\"\n", + " Create a projection for consignment quantities to 2030.\n", + " \n", + " For now, will have specific start year (2019).\n", + " \n", + " TO DO: Generalize to start projection after the latest historical year with data on annual consignment.\n", + " \"\"\"\n", + " \n", + " logging.info('create_consign_historical_and_projection_annual')\n", + " \n", + " # calculate annual consignment from input file\n", + " consign_ann = pd.read_excel(prmt.input_file, sheet_name='consign annual')\n", + " consign_ann = consign_ann[consign_ann['name']=='CA_consignment_annual'][['vintage', 'data']]\n", + " consign_ann = consign_ann.set_index('vintage')['data']\n", + " consign_ann.name = 'consign_ann'\n", + " \n", + " # IOUs have to consign 100% of their electricity allocation, so for them, consign = alloc\n", + " # established by § 95892(b)(1)\n", + " consign_elec_IOU = elec_alloc_IOU\n", + " consign_elec_IOU.name = 'consign_elec_IOU'\n", + " \n", + " # POUs have to consign none of their electricity allocation\n", + " # established by § 95892(b)(2)\n", + " \n", + " # natural gas allocation, minimum consignment portion:\n", + " # set by § 95893(b)(1)(A), Table 9-5, and Table 9-6\n", + " # values from tables above are in the input file\n", + " CA_consign_regs = pd.read_excel(prmt.input_file, sheet_name='CA consign regs')\n", + "\n", + " consign_nat_gas_min_fraction = CA_consign_regs[CA_consign_regs['name']=='CA_natural_gas_min_consign_fraction']\n", + " consign_nat_gas_min_fraction = consign_nat_gas_min_fraction.set_index('year')['data']\n", + "\n", + " consign_nat_gas_min = nat_gas_alloc * consign_nat_gas_min_fraction\n", + " consign_nat_gas_min.name = 'consign_nat_gas_min'\n", + " \n", + " # analysis of natural gas consignment:\n", + " # if we assume that natural gas allocation is proportional to MRR covered emissions for natural gas distribution...\n", + " # ... for each entity, for those entities that did receive an allocation...\n", + " # ... then we can conclude that IOUs are consigning zero or negligible (~0.1 MMTCO2e) optional amounts...\n", + " # ... above the minimum natural gas consignment\n", + " # then actual nat gas consignment = minimum nat gas consignment\n", + " consign_nat_gas = consign_nat_gas_min.copy()\n", + " consign_nat_gas.name = 'consign_nat_gas'\n", + " \n", + " nat_gas_not_consign = pd.concat([nat_gas_alloc, -1*consign_nat_gas], axis=1).sum(axis=1).loc[2013:2030]\n", + " nat_gas_not_consign.name = 'nat_gas_not_consign'\n", + " \n", + " # infer optional consignment amount (elec & nat gas)\n", + " consign_opt = (consign_ann - consign_elec_IOU - consign_nat_gas.fillna(0)).loc[2013:2030]\n", + " \n", + " # if IOUs are not consigning any optional amounts from their nat gas allocation, \n", + " # and we know IOUs must consign 100% of their electricity allocation,\n", + " # then we can conclude that all of the consign optional is from POUs\n", + " # and\n", + " # if we assume that POUs are like IOUs in consigning only the minimum required from natural gas allocation,\n", + " # then the optional POU consignment (in excess of the minimum for nat gas) would be from POUs' electricity allocation\n", + " # (remember, POUs don't have to consign any of their electricity allocation)\n", + " consign_elec_POU = consign_opt.copy()\n", + " consign_elec_POU.name = 'consign_elec_POU'\n", + " \n", + " # calculate the mean fraction of the electricity POU allocation that was consigned \n", + " # (any electricity POU consignment is optional; none is required)\n", + " consign_elec_POU_fraction = (consign_opt/elec_alloc_POU).mean()\n", + "\n", + " for year in range(2019, 2030+1):\n", + " consign_elec_POU_year = elec_alloc_POU.at[year] * consign_elec_POU_fraction \n", + " consign_elec_POU.at[year] = consign_elec_POU_year\n", + " \n", + " elec_POU_not_consign = pd.concat([elec_alloc_POU, -1*consign_elec_POU], axis=1).sum(axis=1).loc[2013:2030]\n", + " elec_POU_not_consign.name = 'elec_POU_not_consign'\n", + " \n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " # if we want to distinguish nat gas consign IOU vs POU, \n", + " # could assume that nat gas allocations are proportional to nat gas distribution covered emissions from each entity\n", + " # note that not all IOUs with natural gas distribution covered emissions (according to MRR) received allocations\n", + " # but all POUs with natural gas distribution covered emissions (according to MRR) did receive allocations\n", + "\n", + " # note that we only have emissions data for 2015-2016\n", + " # so to split nat gas allocations for 2017-2018 between IOU and POU, \n", + " # would need to assume each entity receiving an allocation had the same percentage of emissions as in historical data\n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " \n", + " # consign_ann: calculate new values for projection years\n", + " for year in range(2019, 2030+1):\n", + " consign_ann.at[year] = consign_elec_IOU.at[year] + consign_elec_POU.at[year] + consign_nat_gas.at[year]\n", + " \n", + " # TO DO: create named tuple for all consigned & not consigned dfs; output this named tuple\n", + " \n", + " return(consign_ann, consign_elec_IOU, consign_nat_gas, consign_elec_POU, nat_gas_not_consign, elec_POU_not_consign)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def consign_upsample_historical_and_projection(consign_ann):\n", + " # QUARTERLY VALUES: GET HISTORICAL & CALCULATE PROJECTIONS\n", + " \n", + " qauct_new_avail = prmt.qauct_new_avail\n", + " \n", + " consign_q_hist = qauct_new_avail.loc[qauct_new_avail.index.get_level_values('inst_cat')=='consign']\n", + " \n", + " last_cur_hist_q = consign_q_hist.index.get_level_values('date_level').max()\n", + "\n", + " # create template row for adding additional rows to consign_hist_proj\n", + " consign_1q_template = consign_q_hist.loc[consign_q_hist.index[-1:]]\n", + " consign_1q_template.at[consign_1q_template.index, 'quant'] = float(0)\n", + "\n", + " if last_cur_hist_q.quarter < 4:\n", + " # fill in missing quarters for last historical year\n", + " \n", + " # get annual total consigned\n", + " consign_ann_1y = consign_ann.at[last_cur_hist_q.year]\n", + " \n", + " # calculate total already newly available that year\n", + " df = consign_q_hist.loc[consign_q_hist.index.get_level_values('date_level').year==last_cur_hist_q.year]\n", + " consign_1y_to_date = df['quant'].sum()\n", + " \n", + " # calculate remaining to consign\n", + " consign_remaining = consign_ann_1y - consign_1y_to_date\n", + "\n", + " # number of remaining auctions:\n", + " num_remaining_auct = 4 - last_cur_hist_q.quarter\n", + " \n", + " # average consignment in remaining auctions\n", + " avg_consign = consign_remaining / num_remaining_auct\n", + " \n", + " consign_hist_proj = consign_q_hist.copy()\n", + " \n", + " for proj_q in range(last_cur_hist_q.quarter+1, 4+1):\n", + " proj_date = quarter_period(f\"{last_cur_hist_q.year}Q{proj_q}\")\n", + " \n", + " # create new row; update date_level and quantity\n", + " consign_new_row = consign_1q_template.copy()\n", + " mapping_dict = {'date_level': proj_date}\n", + " consign_new_row = multiindex_change(consign_new_row, mapping_dict)\n", + " consign_new_row.at[consign_new_row.index, 'quant'] = avg_consign\n", + " \n", + " # set new value in consign_hist_proj\n", + " consign_hist_proj = consign_hist_proj.append(consign_new_row)\n", + " \n", + " # for years after last historical data year (last_cur_hist_q.year) \n", + " for year in range(last_cur_hist_q.year+1, 2030+1):\n", + " avg_consign = consign_ann.loc[year] / 4\n", + " \n", + " for quarter in [1, 2, 3, 4]:\n", + " proj_date = quarter_period(f\"{year}Q{quarter}\")\n", + "\n", + " # create new row; update date_level and quantity\n", + " consign_new_row = consign_1q_template.copy()\n", + " mapping_dict = {'date_level': proj_date, \n", + " 'vintage': year}\n", + " consign_new_row = multiindex_change(consign_new_row, mapping_dict)\n", + " \n", + " consign_new_row.at[consign_new_row.index, 'quant'] = avg_consign\n", + " \n", + " # set new value in consign_hist_proj\n", + " consign_hist_proj = consign_hist_proj.append(consign_new_row)\n", + " \n", + " prmt.consign_hist_proj = consign_hist_proj\n", + " \n", + " # no return; func sets object attribute prmt.consign_hist_proj" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_QC_allocation_data():\n", + " \"\"\"\n", + " From input file, import full data set on QC allocations.\n", + " \n", + " Separated by emissions year, date of allocation, and type of allocation (initial, true-up #1, etc.).\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialize: {inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " # get more detailed allocation data (for hindcast)\n", + " QC_alloc_hist = pd.read_excel(prmt.input_file, sheet_name='QC alloc data full')\n", + " QC_alloc_hist['allocation quarter'] = pd.to_datetime(QC_alloc_hist['allocation date']).dt.to_period('Q')\n", + " \n", + " # convert units to MMTCO2e\n", + " QC_alloc_hist['quant'] = QC_alloc_hist['quantity to date (tons CO2e)']/1e6\n", + " \n", + " QC_alloc_hist = QC_alloc_hist.drop(['before or after quarterly auction', \n", + " 'allocation date',\n", + " 'quantity to date (tons CO2e)',\n", + " 'quantity on date for true-ups (tons CO2e)', \n", + " 'notes'], \n", + " axis=1)\n", + " \n", + " QC_alloc_hist = QC_alloc_hist.set_index(['allocation for emissions year',\n", + " 'allocation type',\n", + " 'allocation quarter'])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " # isolate initial allocations\n", + " QC_alloc_initial = QC_alloc_hist.loc[QC_alloc_hist.index.get_level_values('allocation type')=='initial']\n", + " QC_alloc_initial.index = QC_alloc_initial.index.droplevel('allocation type')\n", + " \n", + " # make projection for initial allocations\n", + " # take most recent historical data, assume future initial allocations will scale down with cap\n", + " last_year = QC_alloc_initial.index.get_level_values('allocation for emissions year').max()\n", + "\n", + " df = QC_alloc_initial.loc[QC_alloc_initial.index.get_level_values('allocation quarter').year==last_year]\n", + " QC_alloc_initial_last_year = df['quant'].sum()\n", + " \n", + " # initialize and clear values\n", + " QC_alloc_initial_proj = QC_alloc_initial.copy() # initialize\n", + " QC_alloc_initial_proj['quant'] = float(0)\n", + " QC_alloc_initial_proj = QC_alloc_initial_proj.loc[QC_alloc_initial_proj['quant']>0]\n", + " \n", + " # use cap_adjustment_factor\n", + " # assume the initial allocations will always be in Q1\n", + " for year in range(last_year+1, 2030+1):\n", + " cap_adjustment_ratio = prmt.QC_cap.at[year]/prmt.QC_cap.at[last_year]\n", + " QC_alloc_initial_proj_quant = QC_alloc_initial_last_year * cap_adjustment_ratio\n", + " QC_alloc_initial_proj.at[(year, quarter_period(f'{year}Q1')), 'quant'] = QC_alloc_initial_proj_quant\n", + " \n", + " QC_alloc_initial = QC_alloc_initial.append(QC_alloc_initial_proj)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " # calculate true-ups for each distribution from cumulative data reported\n", + " # (use diff to calculate difference between a given data point and previous one)\n", + " QC_alloc_trueups = QC_alloc_hist.groupby('allocation for emissions year').diff().dropna()\n", + " QC_alloc_trueups.index = QC_alloc_trueups.index.droplevel('allocation type')\n", + " \n", + " # make projection for true-up allocations, following after latest year with a first true-up (in Q3)\n", + " Q3_trueup_mask = QC_alloc_trueups.index.get_level_values('allocation quarter').quarter==3\n", + " Q3_trueups = QC_alloc_trueups.copy().loc[Q3_trueup_mask]\n", + " not_Q3_trueups = QC_alloc_trueups.loc[~Q3_trueup_mask]\n", + " \n", + " last_year = Q3_trueups.index.get_level_values('allocation for emissions year').max()\n", + " \n", + " # first true-ups are 25% of total estimated allocation, whereas initial alloc are 75% of total est. alloc\n", + " # therefore first true-ups are one-third (25%/75%) of the initial true-up\n", + " # in projection, do not model any further true-ups after first true-ups (assume no revisions of allocation)\n", + " for year in range(last_year+1, 2030+1):\n", + " init_last_year_plus1 = QC_alloc_initial.at[(year, f'{year}Q1'), 'quant']\n", + " first_trueup_quant = init_last_year_plus1 / 3\n", + " Q3_trueups.at[(year, quarter_period(f'{year+1}Q3')), 'quant'] = first_trueup_quant\n", + " \n", + " Q3_trueups = Q3_trueups.dropna()\n", + " \n", + " # recombine:\n", + " QC_alloc_trueups = pd.concat([Q3_trueups, not_Q3_trueups])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~\n", + " # also calculate full (est.) allocation for projection years\n", + " # used for setting aside this quantity from cap, for initial alloc and first true-up\n", + " \n", + " # calculate full allocation (100%) from the initial allocation (75% of full quantity)\n", + " QC_alloc_full_proj = QC_alloc_initial_proj * 100/75\n", + " \n", + " # set object attributes\n", + " prmt.QC_alloc_initial = QC_alloc_initial\n", + " prmt.QC_alloc_trueups = QC_alloc_trueups\n", + " prmt.QC_alloc_full_proj = QC_alloc_full_proj\n", + " \n", + " # no return; func sets object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_compliance_events():\n", + " \"\"\"\n", + " From compliance reports, create record of compliance events (quantities surrendered at specific times).\n", + " \n", + " Note that quantities surrendered are *not* the same as the covered emissions that have related obligations.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\") \n", + " \n", + " # get record of retirements (by vintage) from compliance reports\n", + " df = pd.read_excel(prmt.input_file, sheet_name='annual compliance reports')\n", + " df = df.set_index('year of compliance event')\n", + "\n", + " df = df.drop('for 2013-2014 full compliance period')\n", + " df = df.drop(['CA checksum', 'QC checksum'], axis=1)\n", + " df = df.drop(['CA entities retired total (all instruments)', \n", + " 'QC entities retired total (all instruments)'], axis=1)\n", + " df = df.dropna(how='all')\n", + "\n", + " # convert compliance report values into compliance events (transfers that occur each Nov)\n", + " # sum allowances by vintage, combining surrenders by CA & QC entities\n", + " df = df.copy()\n", + " df.columns = df.columns.str.replace('CA entities retired ', '')\n", + " df.columns = df.columns.str.replace('QC entities retired ', '')\n", + " df.columns = df.columns.str.replace('allowance vintage ', '')\n", + " df.columns.name = 'vintage or type'\n", + "\n", + " df = df.stack()\n", + " df = pd.DataFrame(df, columns=['quant'])\n", + " df = df.loc[df['quant'] > 0]\n", + " df = df.groupby(['year of compliance event', 'vintage or type']).sum().reset_index()\n", + " df['compliance_date'] = pd.to_datetime(df['year of compliance event'].astype(str)+'-11-01').dt.to_period('Q')\n", + "\n", + " # rename 'Early Reduction credits'\n", + " df['vintage or type'] = df['vintage or type'].str.replace('Early Reduction credits', 'early_action')\n", + "\n", + " df = df[['compliance_date', 'vintage or type', 'quant']].set_index(['compliance_date', 'vintage or type'])\n", + "\n", + " prmt.compliance_events = df\n", + " \n", + " # no return; func sets object attribute" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# TEST FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_cols_and_indexes_before_transfer(to_acct_MI):\n", + " \"\"\"\n", + " {{{INSERT DOCSTRINGS}}}\n", + " \"\"\"\n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # check that to_acct_MI has only 1 column & that it has MultiIndex\n", + " if len(to_acct_MI.columns)==1 and isinstance(to_acct_MI.index, pd.MultiIndex):\n", + " pass # test passed\n", + " \n", + " elif len(to_acct_MI.columns)>1:\n", + " print(f\"{prmt.test_failed_msg} df to_acct_MI has more than 1 column. Here's to_acct_MI:\")\n", + " print(to_acct_MI)\n", + " \n", + " elif len(to_acct_MI.columns)==0:\n", + " print(f\"{prmt.test_failed_msg} df to_acct_MI has no columns. Here's to_acct_MI:\")\n", + " print(to_acct_MI)\n", + " \n", + " else: # closing \"if len(to_acct_MI.columns)==1...\"\n", + " print(f\"{prmt.test_failed_msg} Something else going on with df to_acct_MI columns and/or index. Here's to_acct_MI:\")\n", + " print(to_acct_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_for_duplicated_indices(df, parent_fn):\n", + " \"\"\"\n", + " Test to check a dataframe (df) for duplicated indices, and if any, to show them (isolated and in context).\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + "\n", + " dups = df.loc[df.index.duplicated(keep=False)]\n", + " \n", + " if dups.empty == False:\n", + " print(f\"{prmt.test_failed_msg} df.index.duplicated when running {parent_fn}; here are duplicated indices:\")\n", + " print(dups)\n", + " \n", + " # get the acct_names that show up in duplicates\n", + " dup_acct_names = dups.index.get_level_values('acct_name').unique().tolist()\n", + " \n", + "# for dup_acct_name in dup_acct_names:\n", + "# print(f\"During {parent_fn}, dups in acct_name {dup_acct_name}; here's the full account:\")\n", + "# print(df.loc[df.index.get_level_values('acct_name')==dup_acct_name])\n", + " elif df[df.index.duplicated(keep=False)].empty == True:\n", + " # test passed: there were no duplicated indices\n", + " pass\n", + " else:\n", + " print(f\"{prmt.test_failed_msg} During {parent_fn}, was meant to be check for duplicated indices.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_if_value_is_float_or_np_float64(test_input):\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " if isinstance(test_input, float)==False and isinstance(test_input, np.float64)==False:\n", + " print(f\"{prmt.test_failed_msg} Was supposed to be a float or np.float64. Instead was type: %s\" % type(test_input))\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_for_negative_values(df, parent_fn):\n", + " \"\"\"\n", + " Checks whether specified df (usually all_accts) has any rows with negative values.\n", + " \n", + " If so, it finds values of acct_name for those rows. \n", + " \n", + " Fn can show the full account for those with negative rows (need to make sure that code is uncommented).\n", + " \"\"\"\n", + " \n", + " if prmt.show_neg_msg == True:\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " neg_cut_off = prmt.neg_cut_off\n", + "\n", + " non_deficits = df.loc[df.index.get_level_values('status')!='deficit']\n", + " neg_values = non_deficits.loc[non_deficits['quant']<-abs(neg_cut_off)]\n", + "\n", + " if len(neg_values) > 0:\n", + " print()\n", + " print(\"Warning\" + f\"! During {parent_fn}, negative values in all_accts (other than deficits).\")\n", + " print(neg_values)\n", + " print()\n", + "\n", + " # get acct_names with negative values\n", + " neg_acct_names = neg_values.index.get_level_values('acct_name').unique().tolist()\n", + "\n", + " for neg_acct_name in neg_acct_names:\n", + " print(f\"There were negative values in acct_name {neg_acct_name}; here's the full account:\")\n", + " print(df.loc[df.index.get_level_values('acct_name')==neg_acct_name])\n", + "\n", + " else:\n", + " pass\n", + " \n", + " else: # prmt.show_neg_msg == False\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_conservation_during_transfer(all_accts, all_accts_sum_init, remove_name):\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # check for conservation of instruments within all_accts\n", + " all_accts_end_sum = all_accts['quant'].sum()\n", + " diff = all_accts_end_sum - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg}: In {inspect.currentframe().f_code.co_name}, instruments were not conserved. Diff: %s\" % diff)\n", + " print(f\"Was for df named {remove_name}\")\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_conservation_simple(df, df_sum_init, parent_fn):\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + " \n", + " df_sum_final = df['quant'].sum()\n", + " diff = df_sum_final - df_sum_init\n", + " \n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in {parent_fn}\")\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_conservation_against_full_budget(all_accts, juris, parent_fn):\n", + " \"\"\"additional conservation check, using total allowance budget\"\"\"\n", + "\n", + " CA_cap = prmt.CA_cap\n", + " QC_cap = prmt.QC_cap\n", + " \n", + " if juris == 'CA':\n", + " if cq.date <= quarter_period('2017Q4'):\n", + " budget = CA_cap.loc[2013:2020].sum()\n", + " \n", + " # additional allowances vintage 2021-2030 assumed to have been added at start of 2018Q1 (Jan 1)\n", + " # (all we know for sure is they were first included in 2017Q4 CIR)\n", + " # budget will be the same through 2030Q4, as far as we know at this point\n", + " # but in some future year, perhaps 2028, post-2030 allowances would likely be added to the system\n", + " elif cq.date >= quarter_period('2018Q1') and cq.date <= quarter_period('2030Q4'):\n", + " budget = CA_cap.loc[2013:2030].sum()\n", + " else:\n", + " print(\"Need to fill in CA budget after 2030, if those in Oct 2017 regs are retained.\")\n", + " \n", + " elif juris == 'QC':\n", + " if cq.date == quarter_period('2013Q4'):\n", + " budget = QC_cap.loc[2013:2020].sum()\n", + "\n", + " elif cq.date >= quarter_period('2014Q1') and cq.date <= quarter_period('2017Q4'):\n", + " # add Early Action allowances to budget\n", + " budget = QC_cap.loc[2013:2020].sum() + 2.040026 # units: MMTCO2e\n", + "\n", + " # additional allowances vintage 2021-2030 assumed to have been added at start of 2018Q1 (Jan 1)\n", + " # (all we know for sure is they were first included in 2017Q4 CIR)\n", + " # budget will be the same through 2030Q4, as far as we know at this point\n", + " # but in some future year, perhaps 2028, post-2030 allowances would likely be added to the system\n", + " elif cq.date >= quarter_period('2018Q1') and cq.date <= quarter_period('2030Q4'):\n", + " budget = QC_cap.loc[2013:2030].sum() + 2.040026 # units: MMTCO2e\n", + "\n", + " else:\n", + " print(\"Error\" + \"! QC budget not defined after 2030.\")\n", + "\n", + " elif juris == 'ON':\n", + " # only represent net flow from ON into CA-QC; \n", + " # these may be any juris, but for purposes of tracking they are recorded as juris 'ON'\n", + " \n", + " if cq.date < quarter_period('2018Q2'):\n", + " budget = 0\n", + " \n", + " # as noted in 2018Q2 CIR: \n", + " # \"As of [July 3, 2018], there are 13,186,967 more compliance instruments held in California and Québec \n", + " # accounts than the total number of compliance instruments issued by those two jurisdictions alone.\"\n", + " \n", + " elif cq.date >= quarter_period('2018Q2') and cq.date <= quarter_period('2017Q4'):\n", + " # add Early Action allowances to budget\n", + " budget = 13.186967 # units: MMTCO2e\n", + " \n", + " else:\n", + " print(\"Error\" + f\"! Some other juris not in list; juris is: {juris}\")\n", + " \n", + " diff = all_accts['quant'].sum() - budget\n", + "\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in {parent_fn}.\")\n", + " print(f\"(Final value minus full budget ({budget} M) was: {diff} M.)\")\n", + " # print(f\"Was for auct_type: {auct_type}\")\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def test_snaps_end_Q4_sum():\n", + " \"\"\"Check for modifications to snaps_end_Q4 by checking sum.\"\"\"\n", + " \n", + " if prmt.snaps_end_Q4['quant'].sum() == prmt.snaps_end_Q4_sum:\n", + " # no change to sum of prmt.snaps_end_Q4; equal to original sum calculated in model initialization\n", + " pass\n", + " else:\n", + " print(f\"{prmt.test_failed_msg} snaps_end_Q4 sum has changed from initial value.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MAIN FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_annual_budgets_in_alloc_hold(all_accts, ser):\n", + " \"\"\"\n", + " Creates allowances for each annual budget, in the Allocation Holding account (alloc_hold).\n", + " \n", + " Does this for each juris.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " \n", + " if len(df.columns==1):\n", + " df.columns = ['quant']\n", + " else:\n", + " print(\"Error\" + \"! In convert_cap_to_MI, len(df.columns==1) was False.\")\n", + " \n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + "\n", + " # metadata for cap\n", + " df['acct_name'] = 'alloc_hold'\n", + " df['juris'] = ser.name.split('_')[0]\n", + " df['inst_cat'] = 'cap'\n", + " # vintage already assigned above\n", + " df['auct_type'] = 'n/a'\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['date_level'] = prmt.NaT_proxy\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + " \n", + " df = df.set_index(prmt.standard_MI_names)\n", + " \n", + " all_accts = all_accts.append(df)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer__from_alloc_hold_to_specified_acct(all_accts, to_acct_MI, vintage_start, vintage_end): \n", + " \"\"\"\n", + " Transfers allocations from allocation holding account (alloc_hold) to other accounts.\n", + " \n", + " Works for APCR (to APCR_acct), VRE (to VRE_acct), and advance (to auct_hold).\n", + " \n", + " Destination account is contained in to_acct_MI metadata.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " vintage_range = range(vintage_start, vintage_end+1)\n", + " \n", + " if prmt.run_tests == True:\n", + " test_cols_and_indexes_before_transfer(to_acct_MI) \n", + " \n", + " # change column name of to_acct_MI, but only if to_acct_MI is a df with one column and MultiIndex\n", + " # rename column of to_acct_MI, whatever it is, to 'to_acct_MI_quant'\n", + " # filter to vintages specified\n", + " to_acct_MI.columns = ['quant']\n", + " to_acct_MI_in_vintage_range = to_acct_MI[to_acct_MI.index.get_level_values('vintage').isin(vintage_range)]\n", + " \n", + " # ~~~~~~~~~~~~~~~`\n", + " # create df named remove, which is negative of to_acct_MI; rename column\n", + " remove = -1 * to_acct_MI_in_vintage_range.copy()\n", + "\n", + " # set indices in remove df (version of to_acct_MI) to be same as from_acct\n", + " mapping_dict = {'acct_name': 'alloc_hold', \n", + " 'inst_cat': 'cap', \n", + " 'auct_type': 'n/a', \n", + " 'newness': 'n/a', \n", + " 'status': 'n/a'}\n", + " remove = multiindex_change(remove, mapping_dict)\n", + "\n", + " # ~~~~~~~~~~~~~~~~\n", + " # if APCR, sum over all vintages, change vintage to 2200 (proxy for non-vintage)\n", + " # then groupby sum to combine all into one set\n", + " to_acct_name = to_acct_MI.index.get_level_values('acct_name').unique().tolist()\n", + " \n", + " if to_acct_name == ['APCR_acct']: \n", + " mapping_dict = {'vintage': 2200}\n", + " to_acct_MI_in_vintage_range = multiindex_change(to_acct_MI_in_vintage_range, mapping_dict)\n", + " to_acct_MI_in_vintage_range = to_acct_MI_in_vintage_range.groupby(level=prmt.standard_MI_names).sum()\n", + "\n", + " elif len(to_acct_name) != 1:\n", + " print(\"Error\" + \"! There was more than one to_acct_name in df that was intended to be for APCR_acct only.\")\n", + " \n", + " else:\n", + " pass\n", + " # ~~~~~~~~~~~~~~~\n", + "\n", + " # separate out any rows with negative values\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " \n", + " # combine dfs to subtract from from_acct & add to_acct_MI_1v\n", + " # (groupby sum adds the positive values in all_accts_pos and the neg values in remove)\n", + " all_accts_pos = pd.concat([all_accts_pos, remove, to_acct_MI_in_vintage_range], sort=True)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine pos & neg\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True:\n", + " name_of_allowances = to_acct_MI.index.get_level_values('inst_cat').unique().tolist()[0]\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_CA_alloc__from_alloc_hold(all_accts, to_acct_MI):\n", + " \"\"\"\n", + " Moves allocated allowances from alloc_hold into private accounts (for non-consign) or limited_use (for consign).\n", + " \n", + " The destination account is specified in metadata (index level 'acct_name') in df to_acct_MI.\n", + " \n", + " Runs at the end of each year (except for anomalous years).\n", + " \n", + " Only processes one vintage at a time; vintage is cq.date.year + 1\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " if prmt.run_tests == True:\n", + " test_cols_and_indexes_before_transfer(to_acct_MI)\n", + "\n", + " # change column name of to_acct_MI, but only if to_acct_MI is a df with one column and MultiIndex\n", + " # rename column of to_acct_MI, whatever it is, to 'to_acct_MI_quant'\n", + " to_acct_MI.columns = ['quant']\n", + " \n", + " # filter to specific vintage\n", + " to_acct_MI_1v = to_acct_MI[to_acct_MI.index.get_level_values('vintage')==(cq.date.year+1)]\n", + " \n", + " # create df named remove, which is negative of to_acct_MI; rename column\n", + " remove = -1 * to_acct_MI_1v\n", + "\n", + " # set indices in remove df (version of to_acct_MI) to be same as from_acct\n", + " mapping_dict = {'acct_name': 'alloc_hold', \n", + " 'inst_cat': 'cap', \n", + " 'auct_type': 'n/a', \n", + " 'newness': 'n/a', \n", + " 'status': 'n/a'}\n", + " remove = multiindex_change(remove, mapping_dict)\n", + "\n", + " # separate out any rows with negative values\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " \n", + " # combine dfs to subtract from from_acct & add to_acct_MI_1v\n", + " # (groupby sum adds the positive values in all_accts_pos and the neg values in remove)\n", + " all_accts_pos = pd.concat([all_accts_pos, remove, to_acct_MI_1v], sort=False)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine pos & neg\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " if prmt.run_tests == True:\n", + " inst_cat_to_acct = str(to_acct_MI.index.get_level_values('inst_cat').unique())\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, inst_cat_to_acct)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer__from_VRE_acct_to_retirement(all_accts): \n", + " \"\"\"\n", + " Transfers allocations from allocation holding account (alloc_hold) to other accounts.\n", + " \n", + " Works for APCR (to APCR_acct), VRE (to VRE_acct), and advance (to auct_hold).\n", + " \n", + " Destination account is contained in to_acct_MI metadata.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " VRE_retired = prmt.VRE_retired\n", + " \n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " try:\n", + " VRE_retired_1q = VRE_retired.xs(cq.date, level='CIR_date', drop_level=False)\n", + "\n", + " VRE_retired_1q = VRE_retired_1q.reset_index()\n", + "\n", + " # record date of retirement as 'date_level'\n", + " VRE_retired_1q = VRE_retired_1q.rename(columns={'CIR_date': 'date_level'})\n", + "\n", + " # create MultiIndex version of VRE_retired_1q, for doing removal and addition to all_accts\n", + " df = VRE_retired_1q.copy()\n", + "\n", + " df['acct_name'] = 'VRE_acct'\n", + " df['juris'] = 'CA'\n", + " df['auct_type'] = 'n/a'\n", + " df['inst_cat'] = 'VRE_reserve'\n", + " # vintage already set (index level 'Vintage')\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " # create MultiIndex version\n", + " VRE_retired_1q_MI = df.set_index(prmt.standard_MI_names)\n", + "\n", + " to_remove = -1 * VRE_retired_1q_MI.copy()\n", + " mapping_dict = {'date_level': prmt.NaT_proxy}\n", + " to_remove = multiindex_change(to_remove, mapping_dict)\n", + "\n", + " to_transfer = VRE_retired_1q_MI.copy()\n", + " mapping_dict = {'acct_name': 'retirement', \n", + " 'status': 'retired'}\n", + " to_transfer = multiindex_change(to_transfer, mapping_dict)\n", + " \n", + " # concat with all_accts_pos, groupby sum, recombine with all_accts_neg\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_pos = pd.concat([all_accts_pos, to_remove, to_transfer], sort=True).groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " logging.info(f\"VRE retirement of {to_transfer['quant'].sum()} M.\")\n", + " \n", + " except:\n", + " # no VRE_retired for given date\n", + " pass\n", + " \n", + " \n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'VRE_reserve'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def consign_groupby_sum_in_all_accts(all_accts):\n", + " \"\"\"\n", + " Sums all types of consignment allowances and assigns them inst_cat 'consign'.\n", + " \n", + " This overwrites old values of inst_cat (i.e., 'elec_alloc_IOU', etc.).\n", + " \n", + " Then this sums across the types of consignment allowances, to get a single annual value for consignment.\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " mask1 = all_accts.index.get_level_values('acct_name')=='limited_use'\n", + " mask2 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2)\n", + " \n", + " consigned = all_accts.loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " mapping_dict = {'inst_cat': 'consign'}\n", + " consigned = multiindex_change(consigned, mapping_dict)\n", + " consigned = consigned.groupby(level=prmt.standard_MI_names).sum() \n", + " \n", + " all_accts = consigned.append(remainder)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_consign__from_limited_use_to_auct_hold_2012Q4_and_make_available(all_accts):\n", + " \"\"\"\n", + " Specified quantities in limited_use account of a particular vintage will be moved to auct_hold.\n", + "\n", + " Only for anomalous auction 2012Q4 (CA-only), in which vintage 2013 consignment were sold at current auction.\n", + " \"\"\" \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " qauct_new_avail = prmt.qauct_new_avail\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # look up quantity consigned in 2012Q4 from historical record (consign_2012Q4)\n", + " # this anomalous fn runs when cq.date = 2012Q4\n", + " quarter_2012Q4 = quarter_period('2012Q4')\n", + " consign_2012Q4_vintage = 2013\n", + " consign_2012Q4 = qauct_new_avail.at[('auct_hold', 'CA', 'current', 'consign', consign_2012Q4_vintage, \n", + " 'new', 'not_avail', quarter_2012Q4, prmt.NaT_proxy, prmt.NaT_proxy),\n", + " 'quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " test_if_value_is_float_or_np_float64(consign_2012Q4)\n", + " \n", + " # get allowances in limited_use, inst_cat=='consign', for specified vintage\n", + " # and calculate sum of that set\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='limited_use'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " mask3 = all_accts.index.get_level_values('vintage')==cq.date.year+1\n", + " mask = (mask1) & (mask2) & (mask3)\n", + " consign_not_avail = all_accts.loc[mask]\n", + " quant_not_avail = consign_not_avail['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST:\n", + " if len(consign_not_avail) != 1:\n", + " print(f\"{prmt.test_failed_msg} Expected consign_not_avail to have 1 row. Here's consign_not_avail:\")\n", + " print(consign_not_avail)\n", + " print(\"Here's all_accts.loc[mask1] (limited_use):\")\n", + " print(all_accts.loc[mask1])\n", + " # END OF TEST\n", + " \n", + " # split consign_not_avail to put only the specified quantity into auct_hold; \n", + " # rest stays in limited_use\n", + " consign_avail = consign_not_avail.copy()\n", + " \n", + " # consign_avail and consign_not_avail have same index (before consign avail index updated below)\n", + " # use this common index for setting new values for quantity in each df\n", + " # (only works because each df is a single row, as tested for above)\n", + " index_first_row = consign_avail.index[0]\n", + " \n", + " # set quantity in consign_avail, using consign_2012Q4 (input/argument for this function)\n", + " consign_avail.at[index_first_row, 'quant'] = consign_2012Q4\n", + " \n", + " # update metadata: put into auct_hold & make them available in cq.date (2012Q4)\n", + " # this fn does not make these allowances available; this will occur in separate fn, at start of cq.date\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'newness': 'new', \n", + " 'date_level': cq.date, \n", + " 'status': 'available'}\n", + " consign_avail = multiindex_change(consign_avail, mapping_dict)\n", + "\n", + " # update quantity in consign_not_avail, to remove those consigned for next_q\n", + " consign_not_avail.at[index_first_row, 'quant'] = quant_not_avail - consign_2012Q4\n", + " \n", + " # get remainder of all_accts\n", + " all_accts_remainder = all_accts.loc[~mask]\n", + " \n", + " # recombine to get all_accts again\n", + " all_accts = pd.concat([consign_avail, consign_not_avail, all_accts_remainder], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_qauct_new_avail():\n", + " \"\"\"\n", + " Create new df qauct_new_avail.\n", + " \n", + " Runs within initialize_model_run (so can't use modelInitialization)\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " qauct_hist = prmt.qauct_hist\n", + " \n", + " # create df: only newly available\n", + " df = qauct_hist.copy()\n", + " df = df.drop(['market', 'Available', 'Sold', 'Unsold' , 'Redesignated'], axis=1) \n", + " df = df.rename(columns={'Newly available': 'quant'})\n", + "\n", + " # add other metadata rows to make it include all prmt.standard_MI_names\n", + " df['acct_name'] = 'auct_hold'\n", + " df['newness'] = 'new'\n", + " df['status'] = 'not_avail' # will become available, but isn't yet\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " df = df.set_index(prmt.standard_MI_names)\n", + " \n", + " # rename & copy (to avoid problem with slices)\n", + " qauct_new_avail = df.copy()\n", + " \n", + " prmt.qauct_new_avail = qauct_new_avail\n", + " # no return; func sets object attribute prmt.qauct_new_avail" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_auctions(all_accts):\n", + " \"\"\"\n", + " Initializes first CA auction in 2012Q4 (also first for any juris in WCI market).\n", + " \n", + " This auction was anomalous, in that:\n", + " 1. There was only this single auction in 2012.\n", + " 2. The 2012Q4 current auction had available only consignment allowances (and no state-owned), \n", + " and they were a vintage ahead (2013).\n", + " 3. The 2012Q4 advance auction had available all vintage 2015 allowances at once.\n", + " \n", + " This function runs only once in each model run.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create CA allowances v2013-v2020, put into alloc_hold\n", + " all_accts = create_annual_budgets_in_alloc_hold(all_accts, prmt.CA_cap.loc[2013:2020])\n", + " \n", + " # pre-test for conservation of allowances\n", + " # (sum_init has to be after creation of allowances in previous step)\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " # transfer APCR allowances out of alloc_hold, into APCR_acct (for vintages 2013-2020)\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.CA_APCR_MI, 2013, 2020)\n", + "\n", + " # transfer advance into auct_hold\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.CA_advance_MI, 2013, 2020)\n", + "\n", + " # transfer VRE allowances out of alloc_hold, into VRE_acct (for vintages 2013-2020)\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.VRE_reserve_MI, 2013, 2020)\n", + "\n", + " # allocations:\n", + " # transfer alloc non-consign into ann_alloc_hold & alloc consign into limited_use \n", + " # alloc v2013 transferred in 2012Q4, before Q4 auction (same pattern as later years)\n", + " # transfer out of alloc_hold to ann_alloc_hold or limited_use\n", + " # (appropriate metadata is assigned to each set of allowances by convert_ser_to_df_MI_alloc)\n", + " \n", + " # transfer all the allocations at once (for one vintage)\n", + " # (fn only processes one vintage at a time)\n", + " all_accts = transfer_CA_alloc__from_alloc_hold(all_accts, prmt.CA_alloc_MI_all)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~\n", + " # CURRENT AUCTION (2012Q4 anomaly):\n", + " # for current auction in 2012Q4, no state-owned allowances; only consign\n", + " # consign annual amount in limited_use: get rid of distinctions between types of consign & groupby sum \n", + " all_accts = consign_groupby_sum_in_all_accts(all_accts)\n", + "\n", + " # put consignment allowances into auct_hold & make available\n", + " all_accts = transfer_consign__from_limited_use_to_auct_hold_2012Q4_and_make_available(all_accts)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~\n", + " # ADVANCE AUCTION (2012Q4 anomaly):\n", + " # remember: all vintage 2015 allowances were available at once in this auction\n", + " # get all allowances aside for advance in auct_hold that are vintage 2015 (cq.date.year+3)\n", + " adv_new_mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " adv_new_mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " adv_new_mask3 = all_accts.index.get_level_values('vintage')==(cq.date.year+3)\n", + " adv_new_mask = (adv_new_mask1) & (adv_new_mask2) & (adv_new_mask3)\n", + " adv_new = all_accts.loc[adv_new_mask]\n", + " all_accts_remainder = all_accts.loc[~adv_new_mask]\n", + " \n", + " # for this anomalous advance auction, all of these allowances were available in one auction\n", + " # update metadata: change 'date_level' to '2012Q4' & status' to 'available'\n", + " mapping_dict = {'date_level': quarter_period('2012Q4'),\n", + " 'status': 'available'}\n", + " adv_new = multiindex_change(adv_new, mapping_dict)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~\n", + " # recombine to create new version of all_accts\n", + " all_accts = pd.concat([adv_new, all_accts_remainder], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_CA_quarterly(all_accts):\n", + "\n", + " \"\"\"\n", + " Function that is used in the loop for each quarter, for each juris.\n", + " \n", + " Applies functions defined earlier, as well as additional rules\n", + " \n", + " Order of sales for each jurisdiction set by jurisdiction-specific functions called within process_quarter.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " latest_historical_year_cur = prmt.qauct_new_avail.index.get_level_values('date_level').year.max()\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # object \"scenario\" holds the data for a particular scenario in various attributes (scenario_CA.avail_accum, etc.)\n", + "\n", + " # START-OF-QUARTER STEPS (INCLUDING START-OF-YEAR) ***********************************************\n", + " \n", + " # process retirements for EIM and bankruptcies (CA only)\n", + " if cq.date.quarter == 4:\n", + " \n", + " # process EIM Outstanding in Q4, prior to auctions (required to be done before Nov 1 compliance deadline)\n", + " # retire any allowances to account for EIM Outstanding Emissions\n", + " all_accts = retire_for_EIM_outstanding(all_accts)\n", + "\n", + " # process bankruptcy retirements at same time\n", + " # retire any allowances to account for bankruptcies\n", + " all_accts = retire_for_bankruptcy(all_accts)\n", + "\n", + " else: # cq.date.quarter != 4\n", + " pass\n", + "\n", + " \n", + " # --------------------------------------------\n", + " \n", + " if cq.date.quarter == 1:\n", + " # start-of-year (Jan 1)\n", + " # on Jan 1 each year, all alloc in ann_alloc_hold are transferred to comp_acct or gen_acct \n", + " all_accts = transfer_CA_alloc__from_ann_alloc_hold_to_general(all_accts)\n", + "\n", + " if cq.date.year <= latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): \n", + " # for current auction, state-owned, vintage==cq.date.year, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, 'CA')\n", + "\n", + " # for current auction, state-owned, sum newly avail & unsold adv, upsample, assign 'date_level'\n", + " all_accts = cur_upsample_avail_state_owned_historical(all_accts, 'CA')\n", + "\n", + " elif cq.date.year > latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): \n", + " # for current auction, state-owned, vintage==cq.date.year, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_projection(all_accts, 'CA')\n", + "\n", + " # for current auction, state-owned, sum newly avail & unsold adv, upsample, assign date_level\n", + " all_accts = cur_upsample_avail_state_owned_projection(all_accts, 'CA')\n", + "\n", + " if cq.date.year >= 2013 and cq.date.year <= 2027: \n", + " # start-of-year (Jan 1???): upsample of allowances for advance auction (before Q1 auctions)\n", + " # note that the model does not attempt to simulate advance auctions for vintages after 2027\n", + " all_accts = upsample_advance_all_accts(all_accts)\n", + " else:\n", + " pass\n", + "\n", + "# # for Q1, take snap (~Jan 5):\n", + "# # after transferring CA alloc out of ann_alloc_hold (Jan 1)\n", + "# # and before Q1 auctions (~Feb 15) \n", + "# take_snapshot_CIR(all_accts, 'CA')\n", + " \n", + " else: # cq.date.quarter != 1\n", + "# # for Q2-Q4, take snap before auction (no special steps at the start of each quarter)\n", + "# take_snapshot_CIR(all_accts, 'CA')\n", + " pass\n", + " \n", + " # END OF START-OF-QUARTER STEPS\n", + "\n", + " # ADVANCE AUCTIONS ********************************************************\n", + " # process advance auctions through vintage 2030, which occur in years through 2027\n", + " logging.info(f\"within {inspect.currentframe().f_code.co_name}, start of advance auction\")\n", + " \n", + " if cq.date.year <= 2027:\n", + " # ADVANCE AUCTION: MAKE AVAILABLE \n", + " all_accts = CA_state_owned_make_available(all_accts, 'advance')\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_CA.avail_accum = avail_accum_append(all_accts, scenario_CA.avail_accum, 'advance')\n", + "\n", + " # redesignation of unsold advance to later advance auction\n", + " all_accts = redesignate_unsold_advance_as_advance(all_accts, 'CA')\n", + "\n", + " # ADVANCE AUCTION: PROCESS SALES - CA ONLY AUCTIONS\n", + " all_accts = process_auction_adv_all_accts(all_accts, 'CA')\n", + "\n", + " else: # cq.date.year > 2027\n", + " pass\n", + " \n", + " # CURRENT AUCTION ********************************************************\n", + " logging.info(\"within process_quarter, start of current auction\")\n", + " \n", + " # CA state-owned current: make available for cq.date\n", + " all_accts = CA_state_owned_make_available(all_accts, 'current')\n", + "\n", + " # each quarter, prior to auction\n", + " # consignment: for all allowances in auct_hold with date_level == cq.date, change status to 'available'\n", + " all_accts = consign_make_available_incl_redes(all_accts)\n", + "\n", + " # redesignate any unsold allowances to auction (if any unsold, and if right conditions)\n", + " all_accts = redesignate_unsold_current_auct(all_accts, 'CA')\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_CA.avail_accum = avail_accum_append(all_accts, scenario_CA.avail_accum, 'current')\n", + "\n", + " # process auction\n", + " all_accts = process_auction_cur_CA_all_accts(all_accts)\n", + " \n", + " \n", + " # FINISHING AFTER AUCTION: ***************************************************************\n", + " \n", + " # use new version of fn for updating reintro eligibility\n", + " update_reintro_eligibility('CA')\n", + " \n", + " if cq.date.quarter == 4: \n", + " # Q4 PROCESSING AFTER AUCTION **********************************************\n", + " # this includes transfer of consigned portion of alloc into limited_use\n", + " logging.info(f\"for {cq.date}, Q4 processing after auction: start\")\n", + " \n", + " # end-of-year: move advance unsold to current auction\n", + " all_accts = adv_unsold_to_cur_all_accts(all_accts) \n", + " \n", + " # check for unsold from current auctions, to retire for bankruptcies\n", + " # TO DO: ADD NEW FUNCTION\n", + " \n", + " # note: the transfer allocation step below moves annual consigned allowances into limited_use\n", + " # this needs to happen before allowances for Q1 of next year can be moved from limited_use to auct_hold\n", + " if cq.date.year >= 2013: \n", + " # transfer allocations (consigned & not consigned)\n", + " \n", + " # transfer all allocations\n", + " # (fn only transfers 1 vintage at a time, vintage == cq.date.year + 1)\n", + " all_accts = transfer_CA_alloc__from_alloc_hold(all_accts, prmt.CA_alloc_MI_all)\n", + "\n", + " # disabled func below; at the moment, it is only set up for QC\n", + " # all_accts = transfer_cur__from_alloc_hold_to_auct_hold_new_avail_anomalies(all_accts, 'CA')\n", + " \n", + " # for consign, groupby sum to get rid of distinctions between types (IOU, POU)\n", + " all_accts = consign_groupby_sum_in_all_accts(all_accts)\n", + "\n", + " else:\n", + " # closing \"if cq.date.year >= 2013:\"\n", + " # end of transfer allocation process\n", + " pass\n", + " \n", + " logging.info(f\"for {cq.date}, Q4 processing after auction: end\")\n", + " \n", + " else: \n", + " # closing \"if cq.date.quarter == 4:\"\n", + " pass\n", + " \n", + " # END-OF-QUARTER (EVERY QUARTER) *****************************************\n", + " logging.info(\"end-of-quarter processing (every quarter) - start\")\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " # check for unsold from current auctions, to roll over to APCR\n", + " all_accts = transfer_unsold__from_auct_hold_to_APCR(all_accts)\n", + "\n", + " # check for VRE retirement (historical data only; assumes no future VRE retirements)\n", + " all_accts = transfer__from_VRE_acct_to_retirement(all_accts)\n", + "\n", + " if cq.date < quarter_period('2030Q4'):\n", + " # transfer consignment allowances into auct_hold, for auction in following quarter\n", + " # (each quarter, after auction)\n", + " # have to do this *after* end-of-year transfer of consignment from alloc_hold to limited_use\n", + " all_accts = transfer_consign__from_limited_use_to_auct_hold(all_accts)\n", + " else:\n", + " # no projection for what happens after 2030Q4, so no transfer to occur in 2030Q4\n", + " pass\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CLEANUP OF all_accts (each quarter)\n", + " # get rid of fractional allowances, zeros, and NaN\n", + " logging.info(\"cleanup of all_accts\")\n", + " \n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + " all_accts = all_accts.dropna()\n", + " # END OF CLEANUP OF all_accts\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # take snap_end at end of each quarter, add to list scenario_CA.snaps_end\n", + " take_snapshot_end(all_accts, 'CA')\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " logging.info(\"end-of-quarter processing (every quarter) - end\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn) \n", + " test_conservation_against_full_budget(all_accts, 'CA', parent_fn) \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)\n", + "# end of process_CA_quarterly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_QC_quarterly(all_accts):\n", + "\n", + " \"\"\"\n", + " Function that is used in the loop for each quarter, for each juris.\n", + " \n", + " Applies functions defined earlier, as well as additional rules\n", + " \n", + " Order of sales for each jurisdiction set by jurisdiction-specific functions called within process_quarter.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " latest_historical_year_cur = prmt.qauct_new_avail.index.get_level_values('date_level').year.max()\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # object \"scenario\" holds the data for a particular scenario in various attributes (scenario_QC.avail_accum, etc.)\n", + "\n", + " # START-OF-QUARTER STEPS (INCLUDING START-OF-YEAR) ***********************************************\n", + "\n", + " if cq.date.quarter == 1:\n", + " # start-of-year (Jan 1???): transfer of allowances for current auction\n", + "\n", + " if cq.date.year <= latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): for current auction, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, 'QC')\n", + "\n", + " # calculate\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_new_avail_anomalies(all_accts, 'QC')\n", + "\n", + " all_accts = cur_upsample_avail_state_owned_historical(all_accts, 'QC')\n", + "\n", + " elif cq.date.year > latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): for current auction, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_projection(all_accts, 'QC')\n", + "\n", + " # start-of-year (Jan 1???): for current auction, sum newly avail & unsold adv, upsample, assign date_level\n", + " all_accts = cur_upsample_avail_state_owned_projection(all_accts, 'QC')\n", + "\n", + "\n", + " if cq.date.year >= 2014 and cq.date.year <= 2027: \n", + " # start-of-year (Jan 1???): upsample of allowances for advance auction (before Q1 auctions)\n", + " # note that the model does not attempt to simulate advance auctions for vintages after 2027\n", + " all_accts = upsample_advance_all_accts(all_accts)\n", + " else:\n", + " pass\n", + "\n", + "# # for Q1, take snap (~Jan 5):\n", + "# # before transferring QC alloc out of ann_alloc_hold (~Jan 15)\n", + "# # and before Q1 auctions (~Feb 15)\n", + "# take_snapshot_CIR(all_accts, 'QC')\n", + "\n", + " # start-of-year (Jan 15): transfer QC allocation, initial quantity (75% of estimated ultimate allocation) \n", + " all_accts = transfer_QC_alloc_init__from_alloc_hold(all_accts)\n", + " \n", + " else: # cq.date.quarter != 1\n", + "# # for Q2-Q4, take snap before auction (no special steps at the start of each quarter)\n", + "# take_snapshot_CIR(all_accts, 'QC')\n", + " pass\n", + " \n", + " # END OF START-OF-QUARTER STEPS\n", + "\n", + " # ADVANCE AUCTIONS ********************************************************\n", + " # process advance auctions through vintage 2030, which occur in years through 2027\n", + " logging.info(f\"within {inspect.currentframe().f_code.co_name}, start of advance auction\")\n", + " \n", + " if cq.date.year <= 2027:\n", + " # ADVANCE AUCTION: MAKE AVAILABLE \n", + " all_accts = QC_state_owned_make_available(all_accts, 'advance')\n", + "\n", + " # for QC, no redesignation of unsold advance as advance\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_QC.avail_accum = avail_accum_append(all_accts, scenario_QC.avail_accum, 'advance')\n", + "\n", + " # ADVANCE AUCTION: PROCESS SALES - QC ONLY AUCTIONS\n", + " all_accts = process_auction_adv_all_accts(all_accts, 'QC')\n", + "\n", + " else: # cq.date.year > 2027\n", + " pass\n", + " \n", + " # CURRENT AUCTION ********************************************************\n", + " logging.info(\"within process_quarter, start of current auction\")\n", + " \n", + " # QC state-owned current: make available for cq.date\n", + " all_accts = QC_state_owned_make_available(all_accts, 'current') \n", + "\n", + " all_accts = redesignate_unsold_current_auct(all_accts, 'QC')\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_QC.avail_accum = avail_accum_append(all_accts, scenario_QC.avail_accum, 'current')\n", + "\n", + " # process auction\n", + " all_accts = process_auction_cur_QC_all_accts(all_accts)\n", + " \n", + " # FINISHING AFTER AUCTION: ***************************************************************\n", + " \n", + " # use new version of fn for updating reintro eligibility\n", + " update_reintro_eligibility('QC')\n", + " \n", + " if cq.date.quarter == 4: \n", + " # Q4 PROCESSING AFTER AUCTION **********************************************\n", + " # this includes transfer of consigned portion of alloc into limited_use\n", + " logging.info(f\"for {cq.date}, for QC, Q4 processing after auction: start\")\n", + " \n", + " # end-of-year: move advance unsold to current auction\n", + " all_accts = adv_unsold_to_cur_all_accts(all_accts)\n", + " \n", + " logging.info(f\"for {cq.date}, Q4 processing after auction: end\")\n", + " \n", + " else: \n", + " # closing \"if cq.date.quarter == 4:\"\n", + " pass\n", + " \n", + " # END-OF-QUARTER (EVERY QUARTER) *****************************************\n", + " logging.info(\"end-of-quarter processing (every quarter) - start\")\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " # September true-ups definitely after auction\n", + " # May auctions might be before auction; not clear; will assume they occur after auctions as well\n", + " # check for QC allocation true-ups; if any, transfer from alloc_hold to gen_acct \n", + " all_accts = transfer_QC_alloc_trueups__from_alloc_hold(all_accts)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CLEANUP OF all_accts (each quarter)\n", + " # get rid of fractional allowances, zeros, and NaN\n", + " logging.info(\"cleanup of all_accts\")\n", + " \n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + " all_accts = all_accts.dropna()\n", + " # END OF CLEANUP OF all_accts\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # take snap_end at end of each quarter, add to list scenario_QC.snaps_end\n", + " take_snapshot_end(all_accts, 'QC')\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " logging.info(\"end-of-quarter processing (every quarter) - end\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn) \n", + " test_conservation_against_full_budget(all_accts, 'QC', parent_fn) \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)\n", + "# end of process_QC_quarterly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def take_snapshot_CIR(all_accts, juris):\n", + " \"\"\"\n", + " Take a snapshot of all_accts, which is later modified for comparison with Compliance Instrument Report (CIR).\n", + " \n", + " snap_CIR labeled with a particular quarter is actually taken early in the following quarter.\n", + " (Example: 2014Q4 snap_CIR is taken in early 2015Q1)\n", + " \n", + " This is following regulators' practice in CIR.\n", + " \n", + " So a snap_CIR taken early in cq.date is labeled as from previous_q (1 quarter before cq.date).\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " previous_q = (pd.to_datetime(f'{cq.date.year}Q{cq.date.quarter}') - DateOffset(months=3)).to_period('Q')\n", + "\n", + " snap_CIR = all_accts.copy()\n", + " snap_CIR['snap_q'] = previous_q\n", + " \n", + " if juris == 'CA':\n", + " scenario_CA.snaps_CIR += [snap_CIR]\n", + " elif juris == 'QC':\n", + " scenario_QC.snaps_CIR += [snap_CIR]\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} named {previous_q} (end)\")\n", + "\n", + " # no return; updates object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def take_snapshot_end(all_accts, juris):\n", + " \"\"\"\n", + " Take a snapshot of all_accts at the end of each quarter.\n", + " \n", + " This is to enable later start of a scenario from any given ending point.\n", + " \n", + " \"\"\" \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " snap_end = all_accts.copy()\n", + " snap_end['snap_q'] = cq.date\n", + " \n", + " if juris == 'CA':\n", + " scenario_CA.snaps_end += [snap_end]\n", + " elif juris == 'QC':\n", + " scenario_QC.snaps_end += [snap_end]\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " # no return; updates object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def avail_accum_append(all_accts, avail_accum, auct_type_specified):\n", + " \"\"\"\n", + " Append allowances available at auction to avail_accum. Runs for advance and current auctions.\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # record allowances available in each auction\n", + " avail_1q = all_accts.loc[(all_accts.index.get_level_values('status')=='available') & \n", + " (all_accts.index.get_level_values('auct_type')==auct_type_specified)]\n", + "\n", + " if prmt.run_tests == True:\n", + " if len(avail_1q) > 0:\n", + " # check that all available allowances have date_level == cq.date\n", + " avail_1q_dates = avail_1q.index.get_level_values('date_level').unique().tolist()\n", + " if avail_1q_dates != [cq.date]:\n", + " print(f\"{prmt.test_failed_msg} Inside {inspect.currentframe().f_code.co_name}...\")\n", + " print(f\"... for {cq.date}, auct_type {auct_type_specified}...\")\n", + " print(f\"... available had some other date_level than cq.date. Here's avail_1q:\")\n", + " print(avail_1q)\n", + " print()\n", + " else:\n", + " pass\n", + " \n", + " elif len(avail_1q) == 0:\n", + " print(f\"{prmt.test_failed_msg} Inside {inspect.currentframe().f_code.co_name}...\")\n", + " print(f\"... for {cq.date}, auct_type {auct_type_specified}, available is empty df.\")\n", + " print()\n", + " \n", + " avail_accum = avail_accum.append(avail_1q)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_duplicated_indices(avail_accum, parent_fn)\n", + " test_for_negative_values(avail_accum, parent_fn)\n", + "\n", + " return(avail_accum)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def CA_state_owned_make_available(all_accts, auct_type):\n", + " \"\"\"\n", + " State-owned allowances in auct_hold are made available when date_level == cq.date.\n", + " \n", + " Works for current auction and advance auction; specified by argument auct_type.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start), for auct_type {auct_type}\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get allowances in auct_hold, for current auction, for date_level == cq.date\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')==auct_type\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('status')=='not_avail'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " \n", + " # update status to 'available'\n", + " avail = all_accts.loc[mask]\n", + " mapping_dict = {'status': 'available'}\n", + " avail = multiindex_change(avail, mapping_dict)\n", + " \n", + " # combine avail with remainder (~mask)\n", + " all_accts = pd.concat([avail, all_accts.loc[~mask]], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end), for auct_type {auct_type}\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def redesignate_unsold_advance_as_advance(all_accts, juris):\n", + " \"\"\"\n", + " Redesignation of unsold allowances from advance auctions, to later advance auctions.\n", + " \n", + " Only applies to CA; QC does not have similar rule.\n", + " \n", + " Based on regulations:\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)\n", + " QC: [QC regs Jan 2018], Section 54. states that unsold advance will only be redesignated as current\n", + " \n", + " If advance allowances remain unsold in one auction, they can be redesignated to a later advance auction.\n", + " But this will only occur after two consecutive auctions have sold out (sold above the floor price).\n", + " If any advance allowances remain unsold at the end of a calendar year, they are retained for \n", + " redesignation to a later current auction.\n", + " \n", + " Therefore the only situation in which allowances unsold in advance auctions can be redesignated \n", + " to another advance auction is if:\n", + " 1. some allowances are unsold in advance auction in Q1\n", + " 2. Q2 and Q3 advance auctions sell out\n", + " And therefore the redesignations can only occur in Q4 of any given year\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # check sales pct in Q1 of cq.date.year\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains(juris)) &\n", + " (df.index.get_level_values('auct_type')=='advance')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " \n", + " # if no sales pct for Q1 of cq.date.year (i.e., for CA in 2012, or QC in 2013), then return NaN\n", + " try:\n", + " sales_pct_adv_Q1 = df.at[f\"{cq.date.year}Q1\"]\n", + " except:\n", + " sales_pct_adv_Q1 = np.NaN\n", + " \n", + " if sales_pct_adv_Q1 < float(1) and cq.date.quarter == 4: \n", + " # auction does not sell out\n", + " # then check sales pct in Q2 & Q3:\n", + " sales_pct_adv_Q2 = df.at[f\"{cq.date.year}Q2\"]\n", + " sales_pct_adv_Q3 = df.at[f\"{cq.date.year}Q3\"]\n", + " \n", + "# # for debugging\n", + "# if use_fake_data == True:\n", + "# # for 2017Q4, override actual value for adv sales in 2017Q2; set to 100%\n", + "# # this allows redesignation of unsold from 2017Q1 in 2017Q4\n", + "# if cq.date == quarter_period('2017Q4'):\n", + "# sales_pct_adv_Q2 = float(1)\n", + "# # end debugging\n", + " \n", + " if sales_pct_adv_Q2 == float(1) and sales_pct_adv_Q3 == float(1):\n", + " # 100% of auction sold; redesignate unsold from Q1, up to limit\n", + " \n", + " # first get the quantity available before reintro\n", + " # get allowances available for cq.date auction\n", + " mask1 = all_accts.index.get_level_values('juris')==juris\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " mask3 = all_accts.index.get_level_values('status')=='available'\n", + " mask4 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " adv_avail_1j_1q_tot = all_accts[mask]['quant'].sum() \n", + " \n", + " # max that can be redesignated is 25% of quantity already scheduled to be available\n", + " max_redes_adv = 0.25 * adv_avail_1j_1q_tot\n", + " \n", + " # get unsold from advance Q1, retained in auction holding account\n", + " unsold_adv_Q1 = all_accts.loc[(all_accts.index.get_level_values('acct_name')=='auct_hold') & \n", + " (all_accts.index.get_level_values('auct_type')=='advance') & \n", + " (all_accts.index.get_level_values('unsold_di')==f\"{cq.date.year}Q1\")]\n", + " \n", + " if len(unsold_adv_Q1) == 1:\n", + " # calculate quantity to be redesignated\n", + " redes_adv_quant = min(max_redes_adv, unsold_adv_Q1['quant'].sum())\n", + "\n", + " # create new df and specify quantity redesignated\n", + " redes_adv = unsold_adv_Q1.copy()\n", + " redes_adv['quant'] = float(0)\n", + " redes_adv.at[redes_adv.index[0], 'quant'] = redes_adv_quant\n", + " \n", + " # create to_remove df that will subtract from auct_hold\n", + " to_remove = -1 * redes_adv.copy()\n", + " \n", + " # update metadata in redes_adv\n", + " mapping_dict = {'newness': 'redes', \n", + " 'status': 'available', \n", + " 'date_level': cq.date}\n", + " redes_adv = multiindex_change(redes_adv, mapping_dict)\n", + " \n", + " # recombine dfs to create redesignated in auct_hold, and to remove quantity from unsold not_avail\n", + " all_accts = pd.concat([all_accts, redes_adv, to_remove], sort=False)\n", + " all_accts = all_accts.groupby(level=prmt.standard_MI_names).sum() \n", + " \n", + " else: \n", + " # len(unsold_adv_Q1) != 1\n", + " print(\"Error\" + \"! Selection of unsold_adv_Q1 did not return a single row; here's unsold_adv_Q1:\")\n", + " print(unsold_adv_Q1)\n", + " print()\n", + " else: \n", + " # end of \"if sales_pct_adv_Q2 == float(1) ...\"\n", + " pass\n", + " else: \n", + " # end of \"if sales_pct_adv_Q1 < float(1)\"\n", + " pass\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_auction_adv_all_accts(all_accts, juris):\n", + " \"\"\"\n", + " Process advance auctions for the specified jurisdiction.\n", + " \n", + " Calculates quantities sold based on percentages in auction_sales_pcts_all (historical and projected).\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get allowances available for cq.date auction\n", + " mask1 = all_accts.index.get_level_values('juris') == juris\n", + " mask2 = all_accts.index.get_level_values('auct_type') == 'advance'\n", + " mask3 = all_accts.index.get_level_values('status') == 'available'\n", + " mask4 = all_accts.index.get_level_values('date_level') == cq.date\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " adv_avail_1j_1q = all_accts[mask]\n", + " remainder = all_accts[~mask]\n", + " \n", + " # get sales % for advance auctions, for this juris, for cq.date\n", + " # (works for auctions whether linked or unlinked, i.e., CA-only and CA-QC)\n", + " ser = prmt.auction_sales_pcts_all.copy()\n", + " ser = ser.loc[(ser.index.get_level_values('market').str.contains(juris)) &\n", + " (ser.index.get_level_values('auct_type') =='advance')]\n", + " ser.index = ser.index.droplevel(['market', 'auct_type'])\n", + " sales_pct_adv_1j_1q = ser.at[cq.date]\n", + " \n", + " # for this juris, quantity allowances sold = available quantity * sales_pct_adv_1q\n", + " sold_tot_1j_1q = adv_avail_1j_1q['quant'].sum() * sales_pct_adv_1j_1q\n", + " \n", + " # remaining: un-accumulator for all CA sales; initialize here; will be updated repeatedly below\n", + " # if there was redes in previous step, this calculates the quantity using the updated version of adv_avail_1j_1q\n", + " adv_remaining_to_sell_1j_1q = adv_avail_1j_1q['quant'].sum() * sales_pct_adv_1j_1q \n", + " \n", + " # before iterating, sort index so that redes sell first\n", + " adv_avail_1j_1q = adv_avail_1j_1q.sort_index(level=['newness'], ascending=[False])\n", + " \n", + " if sales_pct_adv_1j_1q == float(1):\n", + " # then all sell:\n", + " adv_sold_1j_1q = adv_avail_1j_1q.copy()\n", + " \n", + " # and none remain in adv_avail_1j_1q\n", + " adv_avail_1j_1q['quant'] = float(0)\n", + " \n", + " elif sales_pct_adv_1j_1q < float(1):\n", + " # then assign limited sales to particular sets of allowances\n", + " # iterate through all rows for available allowances; remove those sold\n", + " \n", + " # create df to collect sold quantities; initialize with zeros\n", + " # sort_index so that earliest vintages are drawn from first\n", + " adv_avail_1j_1q = adv_avail_1j_1q.sort_index()\n", + " \n", + " adv_sold_1j_1q = adv_avail_1j_1q.copy()\n", + " adv_sold_1j_1q['quant'] = float(0)\n", + " \n", + " for row in adv_avail_1j_1q.index:\n", + " in_stock_row = adv_avail_1j_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, adv_remaining_to_sell_1j_1q)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " adv_remaining_to_sell_1j_1q = adv_remaining_to_sell_1j_1q - sold_from_row_quantity \n", + "\n", + " # update sold quantity & metadata \n", + " adv_sold_1j_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update adv_avail_1j_1q quantity (but not metadata)\n", + " adv_avail_1j_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + " # end of \"for row in adv_avail_1j_1q.index:\"\n", + " else:\n", + " print(\"Error\" + \"! Should not have reached this point; may be that sales_pct_adv_1j_1q == np.NaN\")\n", + " pass\n", + " \n", + " # those still remaining in adv_avail_1j_1q are unsold; update status from 'available' to 'unsold'\n", + " adv_unsold_1j_1q = adv_avail_1j_1q\n", + " mapping_dict = {'status': 'unsold'}\n", + " adv_unsold_1j_1q = multiindex_change(adv_unsold_1j_1q, mapping_dict)\n", + " \n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " adv_sold_1j_1q = multiindex_change(adv_sold_1j_1q, mapping_dict)\n", + " \n", + " # filter out any rows with zeros or fractional allowances\n", + " adv_sold_1j_1q = adv_sold_1j_1q.loc[(adv_sold_1j_1q['quant']>1e-7) | (adv_sold_1j_1q['quant']<-1e-7)]\n", + " adv_unsold_1j_1q = adv_unsold_1j_1q.loc[(adv_unsold_1j_1q['quant']>1e-7) | (adv_unsold_1j_1q['quant']<-1e-7)]\n", + " \n", + " # recombine\n", + " all_accts = pd.concat([adv_sold_1j_1q, \n", + " adv_unsold_1j_1q, \n", + " remainder], \n", + " sort=False)\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def consign_make_available_incl_redes(all_accts):\n", + " \"\"\"\n", + " Changes status of consignment allowances to 'available.'\n", + " \n", + " Runs at the start of each quarter, before that quarter's auction. \n", + " \n", + " Operates on all consigned allowances in auct_hold. \n", + " \n", + " So if there are unsold from previous quarter, these are redesinated.\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get consigned allowances in auct_hold\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " consign_mask = (mask1) & (mask2)\n", + " consign_avail = all_accts.loc[consign_mask]\n", + " \n", + " # change 'status' to 'available'\n", + " mapping_dict = {'status': 'available'}\n", + " consign_avail = multiindex_change(consign_avail, mapping_dict)\n", + " \n", + " # update metadata for redesignated allowances\n", + " # for those that went unsold before (unsold_di != prmt.NaT_proxy):\n", + " # change 'newness' to 'redes'\n", + " # change 'date_level' to cq.date\n", + " redes_mask = consign_avail.index.get_level_values('unsold_di') != prmt.NaT_proxy\n", + " consign_redes = consign_avail.loc[redes_mask]\n", + " mapping_dict = {'newness': 'redes', \n", + " 'date_level': cq.date}\n", + " consign_redes = multiindex_change(consign_redes, mapping_dict)\n", + " \n", + " # recombine to make new version of all_accts\n", + " all_accts = pd.concat([consign_redes, \n", + " consign_avail.loc[~redes_mask],\n", + " all_accts.loc[~consign_mask]\n", + " ], sort=False)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def redesignate_unsold_current_auct(all_accts, juris):\n", + " \"\"\"\n", + " Redesignates state-owned allowances that are eligible, changing 'status' to 'available'.\n", + " \n", + " (Consignment are redesignated by fn make)\n", + " \n", + " Note that this function only redesignates unsold from current auctions to later current auctions. \n", + " \n", + " This function doesn't redesignate unsold advance to later advance auctions.\n", + " \n", + " For applicable regs, see docstrings in functions called below: \n", + " redes_consign & reintro_update_unsold_all_juris\n", + " \n", + " \"\"\"\n", + " \n", + " if juris == 'CA':\n", + " cur_sell_out_counter = scenario_CA.cur_sell_out_counter\n", + " reintro_eligibility = scenario_CA.reintro_eligibility\n", + " elif juris == 'QC':\n", + " cur_sell_out_counter = scenario_QC.cur_sell_out_counter\n", + " reintro_eligibility = scenario_QC.reintro_eligibility\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " logging.info(f\"in {cq.date} juris {juris}, cur_sell_out_counter: {cur_sell_out_counter}\")\n", + " logging.info(f\"in {cq.date} juris {juris}, reintro_eligibility: {reintro_eligibility}\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # only do things in this function if reintro_eligibility == True\n", + " if reintro_eligibility == True:\n", + "\n", + " # ***** redesignation of advance as advance is done by fn redesignate_unsold_advance_as_advance *****\n", + " \n", + " # ***** redesignation of consignment is done by fn consign_make_available_incl_redes *****\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # redesignate unsold current state-owned (aka reintro)\n", + "\n", + " if prmt.run_tests == True:\n", + " # TEST: in all_accts, are available allowances only in auct_hold? & have only 'date_level'==cq.date? \n", + " # (if so, then selection below for auct_type current & status available will work properly)\n", + " available_sel = all_accts.loc[all_accts.index.get_level_values('status')=='available']\n", + " \n", + " if available_sel.empty == False:\n", + " # TEST: are avail only in auct_hold?\n", + " if available_sel.index.get_level_values('acct_name').unique().tolist() != ['auct_hold']:\n", + " print(f\"{prmt.test_failed_msg} Available allowances in account other than auct_hold. Here's available:\")\n", + " print(available_sel)\n", + " else:\n", + " pass\n", + " \n", + " # TEST: do avail have only date_level == cq.date?\n", + " if available_sel.index.get_level_values('date_level').unique().tolist() != [cq.date]:\n", + " print(f\"{prmt.test_failed_msg} Available allowances have date_level other than cq.date (%s). Here's available:\" % cq.date)\n", + " print(available_sel)\n", + " else:\n", + " pass\n", + " \n", + " else: # available_sel.empty == True\n", + " print(\"Warning\" + f\"! In {cq.date}, {inspect.currentframe().f_code.co_name}, available_sel is empty.\")\n", + " print(\"Because available_sel is empty, show auct_hold:\")\n", + " print(all_accts.loc[all_accts.index.get_level_values('acct_name')=='auct_hold'])\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # calculate max reintro\n", + " max_cur_reintro_1j_1q = calculate_max_cur_reintro(all_accts, juris)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # if conditions are right, reintro any state-owned allowances\n", + " # including filter for only positive rows\n", + " mask1 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask3 = all_accts['quant']>0\n", + " unsold_cur_sum = all_accts.loc[(mask1) & (mask2) & (mask3)]['quant'].sum()\n", + " \n", + " if unsold_cur_sum > 0: \n", + " all_accts = reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q)\n", + " \n", + " else: # unsold sum was not > 0, so no unsold to redesignate\n", + " pass\n", + " else: # reintro_eligibility == False; nothing to do\n", + " pass\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def calculate_max_cur_reintro(all_accts, juris):\n", + " \"\"\"\n", + " Calculate the maximum quantity of state-owned allowances that went unsold at current auction that can be reintroduced.\n", + "\n", + " Based on regulations:\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)(C)\n", + " QC: [QC regs Sep 2017], Section 54\n", + " ON: [ON regs Jan 2018], Section 58(4)3\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # select allowances available (state & consign), before redesignation of unsold current state-owned (aka reintro)\n", + " cur_avail_mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " cur_avail_mask2 = all_accts.index.get_level_values('status')=='available'\n", + " cur_avail_mask3 = all_accts.index.get_level_values('juris')==juris\n", + " cur_avail_mask = (cur_avail_mask1) & (cur_avail_mask2) & (cur_avail_mask3)\n", + " \n", + " cur_avail_1j_1q = all_accts.loc[cur_avail_mask]\n", + " cur_avail_1j_1q_tot = cur_avail_1j_1q['quant'].sum()\n", + "\n", + " # calculate maximum reintro quantity for specified juris\n", + " max_cur_reintro_1j_1q = cur_avail_1j_1q_tot * 0.25\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(max_cur_reintro_1j_1q)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q):\n", + " \"\"\"\n", + " Takes unsold allowances, reintro some based on rules.\n", + " \n", + " This function is called only when reintro_eligibility == True.\n", + " \n", + " Rules on redesignation of unsold state-owned allowances (aka reintroduction):\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)(B) & (C)\n", + " QC: [QC regs Sep 2017], Section 54 (paragraph 1)\n", + " ON: [ON regs Jan 2018], Section 58(4)1\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " mask1 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask2 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask5 = all_accts['quant']>0\n", + " \n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " \n", + " # QC anomalies: add additional masks:\n", + " if cq.date == quarter_period('2015Q2'):\n", + " # only reintro vintage 2013\n", + " mask6 = all_accts.index.get_level_values('vintage')==2013\n", + " mask = mask & (mask6)\n", + " \n", + " reintro_eligible_1j = all_accts.loc[mask]\n", + " \n", + " elif cq.date == quarter_period('2015Q3') or cq.date == quarter_period('2015Q4'):\n", + " # block reintro\n", + " reintro_eligible_1j = prmt.standard_MI_empty.copy()\n", + " \n", + " else:\n", + " reintro_eligible_1j = all_accts.loc[mask]\n", + " \n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " if reintro_eligible_1j['quant'].sum() > 0:\n", + "\n", + " # accumulator: amount reintro in present auction\n", + " reintro_1q_quant = 0 # initialize\n", + "\n", + " # un-accumulator: amount remaining to be introduced\n", + " max_cur_reintro_1j_1q_remaining = max_cur_reintro_1j_1q\n", + " \n", + " # initialize df to collect all reintro\n", + " reintro_1j_1q = prmt.standard_MI_empty.copy()\n", + " \n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " reintro_eligible_1j = reintro_eligible_1j.sort_index()\n", + " \n", + " for row in reintro_eligible_1j.index:\n", + " if max_cur_reintro_1j_1q_remaining == 0:\n", + " break\n", + " \n", + " else:\n", + " reintro_one_batch_quantity = min(max_cur_reintro_1j_1q_remaining,\n", + " reintro_eligible_1j.at[row, 'quant'])\n", + "\n", + " # update accumulator for amount reintro so far in present quarter (may be more than one batch)\n", + " reintro_1q_quant += reintro_one_batch_quantity\n", + " \n", + " # update un-accumulator for max_cur_reintro_1j_1q_remaining (may be more than one batch)\n", + " max_cur_reintro_1j_1q_remaining += -1*reintro_one_batch_quantity\n", + "\n", + " # copy reintro_eligible_1j before update; use to create reintro_1j_1q \n", + " # create copy of reintro_eligible_1j & clear quantities (set equal to zero float)\n", + " reintro_one_batch = reintro_eligible_1j.copy()\n", + " reintro_one_batch.index.name = 'reintro_one_batch'\n", + " reintro_one_batch.name = 'reintro_one_batch'\n", + " reintro_one_batch['quant'] = float(0)\n", + " \n", + " # set new value for 'quant'; delete rows with zero quantity\n", + " reintro_one_batch.at[row, 'quant'] = reintro_one_batch_quantity\n", + " \n", + " # put reintro for this row into df for collecting all reintro for this juris\n", + " reintro_1j_1q = pd.concat([reintro_1j_1q, reintro_one_batch])\n", + " \n", + " # update reintro_eligible_1j to remove reintro_one_batch_quantity\n", + " reintro_eligible_1j.at[row, 'quant'] = reintro_eligible_1j.at[row, 'quant'] - reintro_one_batch_quantity \n", + "\n", + " # filter out rows with fractional allowances, zero, NaN\n", + " reintro_1j_1q = reintro_1j_1q.loc[(reintro_1j_1q['quant']>1e-7) | (reintro_1j_1q['quant']<-1e-7)].dropna()\n", + " reintro_1j_1q = reintro_1j_1q.dropna()\n", + " \n", + " # log the quantity reintroduced\n", + " logging.info(f\"in {cq.date} for juris {juris}, quantity reintro: {reintro_1j_1q['quant'].sum()}\")\n", + " \n", + " # don't need to update acct_name; should still be auct_hold\n", + " mapping_dict = {'newness': 'reintro', \n", + " 'status': 'available', \n", + " 'date_level': cq.date}\n", + " reintro_1j_1q = multiindex_change(reintro_1j_1q, mapping_dict)\n", + "\n", + " # filter out zero rows\n", + " reintro_eligible_1j = reintro_eligible_1j.loc[reintro_eligible_1j['quant']>0]\n", + " reintro_1j_1q = reintro_1j_1q.loc[reintro_1j_1q['quant']>0]\n", + " \n", + " # concat to recreate all_accts\n", + " # (alternative: create df of reintro to remove, then just concat all_accts_pos, to add, to remove)\n", + " all_accts = pd.concat([reintro_1j_1q, reintro_eligible_1j, remainder], sort=True)\n", + " all_accts = all_accts.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " else: # if reintro_eligible_1j['quant'].sum() is not > 0\n", + " pass\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_auction_cur_CA_all_accts(all_accts):\n", + " \"\"\"\n", + " Processes current auction for CA, applying the specified order of sales (when auctions don't sell out).\n", + " \n", + " Order of sales based on regs:\n", + " CA: [CA regs Oct 2017], § 95911(f)(1)\n", + "\n", + " CA: for consignment, how to split sales between entities:\n", + " [CA regs Oct 2017], § 95911(f)(2) \n", + " \n", + " Notes: Once it is confirmed to be working properly, this could be simplified by:\n", + " 1. not re-doing filtering from scratch each batch of allowances\n", + " 2. finishing new fn avail_to_sold_all_accts, to avoid repetitive code\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: check that all available allowances are in auct_hold\n", + " avail_for_test = all_accts.loc[all_accts.index.get_level_values('status')=='available']\n", + " avail_for_test_accts = avail_for_test.index.get_level_values('acct_name').unique().tolist()\n", + " if avail_for_test.empty == False:\n", + " if avail_for_test_accts != ['auct_hold']:\n", + " print(f\"{prmt.test_failed_msg} Some available allowances were in an account other than auct_hold. Here's available:\")\n", + " print(avail_for_test)\n", + " else: # avail_for_test_accts == ['auct_hold']\n", + " pass\n", + " else: # avail_for_test.empty == True\n", + " print(\"Warning\" + \"! avail_for_test is empty.\")\n", + " # END OF TEST\n", + " \n", + " # get sales % for current auctions, for this juris, for cq.date\n", + " # (works for auctions whether linked or unlinked, i.e., CA-only and CA-QC)\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains('CA')) &\n", + " (df.index.get_level_values('auct_type')=='current')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " sales_fract_cur_1j_1q = df.at[cq.date]\n", + " \n", + " # get current available allowances\n", + " # (it should be that all available allowances are in auct_hold)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " cur_avail_CA_1q = all_accts.loc[mask]\n", + " \n", + " not_cur_avail_CA_1q = all_accts.loc[~mask]\n", + " \n", + " if sales_fract_cur_1j_1q == 1.0:\n", + " # all available allowances are sold and transferred into gen_acct\n", + " cur_sold_CA_1q = cur_avail_CA_1q\n", + " mapping_dict = {'status': 'sold', 'acct_name': 'gen_acct'}\n", + " cur_sold_CA_1q = multiindex_change(cur_sold_CA_1q, mapping_dict)\n", + " \n", + " # recombine\n", + " all_accts = pd.concat([cur_sold_CA_1q, not_cur_avail_CA_1q], sort=False)\n", + " \n", + " else: # sales_fract_cur_1j_1q != 1.0:\n", + " # calculate quantity of CA allowances sold (and test that variable is a float)\n", + " cur_sold_1q_tot_CA = cur_avail_CA_1q['quant'].sum() * sales_fract_cur_1j_1q\n", + " \n", + " if prmt.run_tests == True:\n", + " test_if_value_is_float_or_np_float64(cur_sold_1q_tot_CA)\n", + "\n", + " # remaining: un-accumulator for all CA sales; initialize here; will be updated repeatedly below\n", + " cur_remaining_to_sell_1q_CA = cur_sold_1q_tot_CA.copy()\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: consignment are first\n", + " # use if statement (or alternative) for all types of consignment, \n", + " # because it is possible (although unlikely) that, in a particular quarter, entities may opt to consign 0\n", + " # even if they must consign more than 0 for the whole year\n", + " # earlier in model, amounts per quarter are calculated as one-quarter of annual total, but that may change in future\n", + "\n", + " # select consignment allowances (from allowances available in cq.date current auction)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " consign_avail_1q = all_accts.loc[mask]\n", + " not_consign_avail_1q = all_accts.loc[~mask]\n", + " \n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + "\n", + " # start by creating df from avail, with values zeroed out\n", + " consign_sold_1q = consign_avail_1q.copy()\n", + " consign_sold_1q['quant'] = float(0)\n", + "\n", + " # in regulations, for consignment, no sales priority of redesignated vs. newly available\n", + " # however, for simplicity and to match state-owned behavior, sort df so that redes will sell before new\n", + " # first sort by vintage, ascending=True (redes will be same vintage or earlier than newly available)\n", + " # then sort by newness, ascending=False (so that 'redes' will occur before 'new')\n", + " consign_avail_1q = consign_avail_1q.sort_index(level=['vintage', 'newness'], ascending=[True, False])\n", + "\n", + " for row in consign_avail_1q.index:\n", + " in_stock_row = consign_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_CA)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_CA = cur_remaining_to_sell_1q_CA - sold_from_row_quantity\n", + "\n", + " # update sold quantity\n", + " consign_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update avail quantity\n", + " consign_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + " # update metadata for sold\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " consign_sold_1q = multiindex_change(consign_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " consign_unsold_1q = consign_avail_1q\n", + "\n", + " # recombine to create new version of all_accts\n", + " all_accts = pd.concat([consign_sold_1q,\n", + " consign_unsold_1q,\n", + " not_consign_avail_1q], \n", + " sort=False)\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances\n", + " all_accts_after_consign_sales = all_accts['quant'].sum()\n", + " diff = all_accts_after_consign_sales - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in fn process_auction_cur_CA_all_accts, after consignment sales.\")\n", + " print(\"diff = all_accts_after_consign_sales - all_accts_sum_init:\")\n", + " print(diff)\n", + " print(\"all_accts_sum_init: %s\" % all_accts_sum_init)\n", + " print(\"consign_sold_1q sum: %s\" % consign_sold_1q['quant'].sum())\n", + " print(\"consign_unsold_1q sum: %s\" % consign_unsold_1q['quant'].sum())\n", + " print(\"not_consign_avail_1q sum: %s\" % not_consign_avail_1q['quant'].sum())\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: after consignment, reintro are next\n", + "\n", + " # extract reintro allowances from all_accts\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts.index.get_level_values('newness')=='reintro'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " reintro_avail_1q = all_accts.loc[mask]\n", + " not_reintro_avail_1q = all_accts.loc[~mask]\n", + "\n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + " \n", + " # start by creating df from avail, with values zeroed out\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " reintro_avail_1q = reintro_avail_1q.sort_index()\n", + " \n", + " reintro_sold_1q = reintro_avail_1q.copy()\n", + " reintro_sold_1q['quant'] = float(0)\n", + "\n", + " for row in reintro_avail_1q.index:\n", + " in_stock_row = reintro_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_CA)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_CA = cur_remaining_to_sell_1q_CA - sold_from_row_quantity\n", + "\n", + " # update sold quantity & metadata\n", + " reintro_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update reintro_avail_1q quantity (but not metadata)\n", + " reintro_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + "\n", + " # using all_accts:\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " reintro_sold_1q = multiindex_change(reintro_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " reintro_unsold_1q = reintro_avail_1q\n", + "\n", + " # recombine\n", + " all_accts = pd.concat([reintro_sold_1q,\n", + " reintro_unsold_1q,\n", + " not_reintro_avail_1q], \n", + " sort=False)\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances\n", + " all_accts_after_reintro_sales = all_accts['quant'].sum()\n", + " diff = all_accts_after_reintro_sales - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in fn process_auction_cur_CA_all_accts, after reintro sales.\")\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: state-owned allowances available for first time as current (including fka adv, if there are any)\n", + "\n", + " # extract state allowances new to current auctions (from all_accts)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts.index.get_level_values('newness')=='new'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " new_avail_1q = all_accts.loc[mask]\n", + " not_new_avail_1q = all_accts.loc[~mask]\n", + "\n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + " \n", + " # start by creating df from avail, with values zeroed out\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " new_avail_1q = new_avail_1q.sort_index()\n", + " \n", + " new_sold_1q = new_avail_1q.copy()\n", + " new_sold_1q['quant'] = float(0)\n", + "\n", + " for row in new_avail_1q.index:\n", + " in_stock_row = new_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_CA)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_CA = cur_remaining_to_sell_1q_CA - sold_from_row_quantity\n", + "\n", + " # update sold quantity & metadata\n", + " new_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update new_avail_1q quantity (but not metadata)\n", + " new_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + " # using all_accts:\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " new_sold_1q = multiindex_change(new_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " new_unsold_1q = new_avail_1q\n", + "\n", + " # recombine & groupby sum\n", + " all_accts = pd.concat([new_sold_1q,\n", + " new_unsold_1q,\n", + " not_new_avail_1q], \n", + " sort=True).sort_index() \n", + " # all_accts = all_accts.groupby(prmt.standard_MI_names).sum()\n", + "\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances\n", + " all_accts_after_new_sales = all_accts['quant'].sum()\n", + " diff = all_accts_after_new_sales - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in fn process_auction_cur_CA_all_accts, after new sales.\")\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # update status for all unsold\n", + " all_accts = unsold_update_status(all_accts)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # filter out rows with fractional allowances or zero\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)].dropna()\n", + "\n", + " # end of if-else statement that began \"if sales_fract_cur_1j_1q == 1.0)\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)\n", + "# end of process_auction_cur_CA_all_accts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def update_reintro_eligibility(juris):\n", + " \"\"\"\n", + " Updates reintro_eligibility (attribute of object) based on results of cq.date auction.\n", + " \n", + " Selects auction sales percentages for specified jurisdiction (to handle separate markets).\n", + " \n", + " For citation of regs about reintro for each jurisdiction, see docstring for reintro_update_unsold_1j.\n", + " \"\"\"\n", + " \n", + " # set local variables cur_sell_out_counter & reintro_eligibility\n", + " # (corresponding attributes of objects scenario_CA & scenario_QC are set at end of func)\n", + " if juris == 'CA':\n", + " cur_sell_out_counter = scenario_CA.cur_sell_out_counter\n", + " reintro_eligibility = scenario_CA.reintro_eligibility\n", + " elif juris == 'QC':\n", + " cur_sell_out_counter = scenario_QC.cur_sell_out_counter\n", + " reintro_eligibility = scenario_QC.reintro_eligibility\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " logging.info(f\"in {cq.date} for {juris}, cur_sell_out_counter is {cur_sell_out_counter} before update\")\n", + " logging.info(f\"in {cq.date} for {juris}, reintro_eligibility is {reintro_eligibility} before update\")\n", + " \n", + " # get sales % for advance auctions, for this juris, for cq.date\n", + " # (works for auctions both linked and unlinked, i.e., inputting juris=='CA' works for CA-only and CA-QC auctions)\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains(juris)) &\n", + " (df.index.get_level_values('auct_type')=='current')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " sales_pct_cur_1j_1q = df.at[cq.date]\n", + " \n", + " # if sales_pct_cur_1j_1q is 1, returns True; else False\n", + " cur_sell_out = sales_pct_cur_1j_1q == float(1)\n", + "\n", + " if cur_sell_out == True:\n", + " cur_sell_out_counter = cur_sell_out_counter + 1\n", + " elif cur_sell_out == False:\n", + " # reset value\n", + " cur_sell_out_counter = 0\n", + " else:\n", + " print(\"Error\" + \"!: cur_sell_out is neither True nor False.\")\n", + "\n", + " if cur_sell_out_counter < 2:\n", + " reintro_eligibility = False\n", + " elif cur_sell_out_counter >= 2:\n", + " reintro_eligibility = True\n", + " else:\n", + " print(\"Error\" + \"!: cur_sell_out_counter has a problem\")\n", + "\n", + " logging.info(f\"in {cq.date} for {juris}, cur_sell_out_counter is {cur_sell_out_counter} after update\")\n", + " logging.info(f\"in {cq.date} for {juris}, reintro_eligibility is {reintro_eligibility} after update\")\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " # update attributes of objects scenario_CA & scenario_QC\n", + " if juris == 'CA':\n", + " scenario_CA.cur_sell_out_counter = cur_sell_out_counter\n", + " scenario_CA.reintro_eligibility = reintro_eligibility\n", + " elif juris == 'QC':\n", + " scenario_QC.cur_sell_out_counter = cur_sell_out_counter\n", + " scenario_QC.reintro_eligibility = reintro_eligibility\n", + "\n", + " # using object attributes, no return from this func" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_consign__from_limited_use_to_auct_hold(all_accts):\n", + " \"\"\"\n", + " Specified quantities remaining in limited_use account of a particular vintage will be moved to auct_hold.\n", + "\n", + " Allowances consigned must be transferred into auct_hold 75 days before the auction in which they will be available.\n", + " CA regs: § 95910(d)(4).\n", + " \n", + " Runs at the end of each quarter, after auction processed for that quarter.\n", + " \n", + " So, i.e., for Q3 auction ~Aug 15, transfer would occur ~June 1 (in Q2), after Q2 auction (~May 15).\n", + " \n", + " These allowances will become available in the following auction (one quarter after cq.date).\n", + " \n", + " Since this is for consignment, which are only in CA, it doesn't apply to QC or ON.\n", + " \"\"\" \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # create next_q after cq.date (formatted as quarter)\n", + " next_q = (pd.to_datetime(f'{cq.date.year}Q{cq.date.quarter}') + DateOffset(months=3)).to_period('Q')\n", + " \n", + " # get quantity consigned in next_q (consign_next_q_quant)\n", + " # vintage of these allowances will always be next_q.year \n", + " # (true even for anomalous CA auction in 2012Q4; other jurisdictions don't have consignment)\n", + " # look up quantity consigned in next_q from historical record\n", + " consign_next_q_quant = prmt.consign_hist_proj.at[\n", + " ('auct_hold', 'CA', 'current', 'consign', next_q.year, 'new', 'not_avail', next_q, \n", + " prmt.NaT_proxy, prmt.NaT_proxy, 'MMTCO2e'), \n", + " 'quant']\n", + "\n", + " if prmt.run_tests == True:\n", + " test_if_value_is_float_or_np_float64(consign_next_q_quant)\n", + " \n", + " # get allowances in limited_use, inst_cat=='consign', for specified vintage\n", + " # and calculate sum of that set\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='limited_use'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " mask3 = all_accts.index.get_level_values('vintage')==next_q.year\n", + " mask4 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4)\n", + " consign_not_avail = all_accts.loc[mask]\n", + " quant_not_avail = consign_not_avail['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: check that consign_not_avail is only 1 row\n", + " if len(consign_not_avail) != 1:\n", + " print(f\"{prmt.test_failed_msg} Expected consign_not_avail to have 1 row. Here's consign_not_avail:\")\n", + " print(consign_not_avail)\n", + " print(\"Here's all_accts.loc[mask1] (limited_use):\")\n", + " print(all_accts.loc[mask1])\n", + " print(\"Here's all_accts:\")\n", + " print(all_accts)\n", + " # END OF TEST\n", + " \n", + " # split consign_not_avail to put only the specified quantity into auct_hold; \n", + " # rest stays in limited_use\n", + " consign_avail = consign_not_avail.copy()\n", + " \n", + " # consign_avail and consign_not_avail have same index (before consign avail index updated below)\n", + " # use this common index for setting new values for quantity in each df\n", + " # (only works because each df is a single row, as tested for above)\n", + " index_first_row = consign_avail.index[0]\n", + " \n", + " # set quantity in consign_avail, using consign_next_q_quant\n", + " consign_avail.at[index_first_row, 'quant'] = consign_next_q_quant\n", + " \n", + " # update metadata: put into auct_hold\n", + " # this fn does not make these allowances available; this will occur in separate fn, at start of cq.date\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'newness': 'new', \n", + " 'status': 'not_avail', \n", + " 'date_level': next_q}\n", + " consign_avail = multiindex_change(consign_avail, mapping_dict)\n", + "\n", + " # update quantity in consign_not_avail, to remove those consigned for next_q\n", + " consign_not_avail.at[index_first_row, 'quant'] = quant_not_avail - consign_next_q_quant\n", + " \n", + " # get remainder of all_accts\n", + " all_accts_remainder = all_accts.loc[~mask]\n", + " \n", + " # recombine to get all_accts again\n", + " all_accts = pd.concat([consign_avail, \n", + " consign_not_avail, \n", + " all_accts_remainder], \n", + " sort=False)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_CA_alloc__from_ann_alloc_hold_to_general(all_accts):\n", + " \"\"\"\n", + " Transfers CA allocations out of annual allocation holding account, into general account.\n", + " \n", + " Transfers occur on Jan 1 of each year, per 95831(a)(6)(D) to (I).\n", + " \n", + " Note that Compliance Instrument Reports for Q4 of a given year are generated in early Jan of following year.\n", + " So CIRs for Q4 include these Jan 1 tranfers of CA allocations out of government accounts and into private accounts.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get all allowances in acct_name == 'ann_alloc_hold' & juris == 'CA'\n", + " mask1 = all_accts.index.get_level_values('acct_name') == 'ann_alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('juris') == 'CA'\n", + " mask = (mask1) & (mask2)\n", + " to_transfer = all_accts.loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " # change metadata for ann_alloc_hold allowances, to move to compliance account\n", + " mapping_dict = {'acct_name': 'gen_acct'}\n", + " to_transfer = multiindex_change(to_transfer, mapping_dict)\n", + " \n", + " # recombine dfs\n", + " all_accts = pd.concat([to_transfer, remainder])\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, juris):\n", + " \"\"\"\n", + " Transfer the known historical quantities of allowances available in current auctions.\n", + " \n", + " Processes all allowances for a given year (with date_level year == cq.date year).\n", + " \n", + " Occurs at the start of each year (Jan 1).\n", + " \n", + " Transfers specified allowances from alloc_hold to auct_hold.\n", + " \n", + " Operates only for newly available allowances, as specified manually in input file sheet 'qauct 2012-2017'.\n", + " \n", + " Operates only for state-owned allowances.\n", + " \n", + " Note: There is a separate fn for making the allowances in auct_hold available.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n", + " # get the historical data for cq.date.year\n", + " df = prmt.qauct_new_avail.copy()\n", + " mask1 = df.index.get_level_values('inst_cat')==juris\n", + " mask2 = df.index.get_level_values('auct_type')=='current'\n", + " mask3 = df.index.get_level_values('date_level').year==cq.date.year\n", + " mask4 = df.index.get_level_values('vintage')==cq.date.year\n", + " avail_1v = df.loc[(mask1) & (mask2) & (mask3) & (mask4)]\n", + " \n", + " # if partial year of historical data, fill in remaining auctions\n", + " # in avail_1v, get the max date_level, and then the quarter of that max date\n", + " max_quarter = avail_1v.index.get_level_values('date_level').max().quarter\n", + "\n", + " for quarter in range(max_quarter+1, 4+1): \n", + " # only enters loop if max_quarter < 4, which means partial year of data; \n", + " # make projection for remaining quarters,\n", + " # assume that future auctions will be same on average as auctions in year-to-date\n", + " \n", + " # quantity of allowances newly available so far in cq.date.year\n", + " avail_1v_tot_so_far = avail_1v['quant'].sum()\n", + " \n", + " # average quantity per auction\n", + " avail_1v_avg = avail_1v_tot_so_far / max_quarter\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # fill in remaining auctions\n", + " \n", + " # create new df and template row for appending to it\n", + " avail_1v_plus = avail_1v.copy()\n", + " template_row = avail_1v.loc[avail_1v_plus.index[-1:]]\n", + " \n", + " # determine new quarterly date for auction, set in level 'date_level'\n", + " future_date = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " mapping_dict = {'date_level': future_date}\n", + " template_row = multiindex_change(template_row, mapping_dict)\n", + "\n", + " # set new quantity\n", + " template_row.at[template_row.index, 'quant'] = avail_1v_avg\n", + "\n", + " # append to historical plus\n", + " avail_1v_plus = avail_1v_plus.append(template_row)\n", + " \n", + " # set the extended historical record to have same name as original df\n", + " avail_1v = avail_1v_plus\n", + "\n", + " # end of \"for quarter in range(max_quarter+1, 4+1):\"\n", + " \n", + " # Now there is a full year of data (either historical, or historical + projection for remaining quarters)\n", + " \n", + " # calculate total to be available, of current vintage (vintage == cq.date.year)\n", + " avail_1v_tot = avail_1v['quant'].sum()\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # already in auct_hold (state-owned), current vintage\n", + " # (unsold advance also added to auct_hold)\n", + " # may be multiple rows\n", + " mask1 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='current' # probably redundant\n", + " mask3 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask4 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " # calculate quantity of current vintage already in auct_hold\n", + " auct_hold_1v_tot = all_accts.loc[mask]['quant'].sum()\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # calculate quantity to transfer: only those needed to get avail_1v_tot into auct_hold\n", + " to_transfer_tot = avail_1v_tot - auct_hold_1v_tot\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # from alloc_hold, get cap allowances of specified juris & vintage, to be drawn from\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('juris')==juris\n", + " mask3 = all_accts.index.get_level_values('inst_cat')=='cap'\n", + " mask4 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " # split all_accts into two parts (for alloc_hold & not auct_hold)\n", + " alloc_hold_1v = all_accts.loc[mask]\n", + " not_alloc_hold = all_accts.loc[~mask]\n", + "\n", + " alloc_hold_1v_tot = alloc_hold_1v['quant'].sum()\n", + "\n", + " diff = alloc_hold_1v_tot - to_transfer_tot\n", + "\n", + " if diff < 1e-7:\n", + " # there are just enough allowances in alloc_hold *or* there is a shortfall in alloc_hold\n", + "\n", + " # then transfer everything in alloc_hold to auct_hold\n", + " # (will wind up with negative values in auct_hold after cur_upsample_avail_state_owned_historical)\n", + " from_alloc_hold = alloc_hold_1v.copy()\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'auct_type': 'current', \n", + " 'inst_cat': juris, \n", + " 'newness': 'new', \n", + " 'status': 'not_avail'}\n", + " from_alloc_hold = multiindex_change(from_alloc_hold, mapping_dict)\n", + "\n", + " # recreate all_accts\n", + " # note: removed deficit that had been calculated before\n", + " all_accts = pd.concat([not_alloc_hold, from_alloc_hold], sort=True)\n", + "\n", + " # groupby sum, splitting all_accts into pos & neg\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " elif diff > 1e-7:\n", + " # excess allowances in alloc_hold, so leave remainder in alloc_hold\n", + " # (reminder: diff = alloc_hold_1v_tot - to_transfer_tot)\n", + "\n", + " # split alloc_hold allowances into part needed and part left behind\n", + " from_alloc_hold = alloc_hold_1v.copy()\n", + " from_alloc_hold.at[from_alloc_hold.index, 'quant'] = to_transfer_tot\n", + "\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'auct_type': 'current', \n", + " 'inst_cat': juris, \n", + " 'newness': 'new', \n", + " 'status': 'not_avail'}\n", + " from_alloc_hold = multiindex_change(from_alloc_hold, mapping_dict)\n", + " \n", + " remainder_alloc_hold = alloc_hold_1v.copy()\n", + " # reminder: diff = alloc_hold_1v_tot - to_transfer_tot\n", + " remainder_alloc_hold.at[remainder_alloc_hold.index, 'quant'] = diff\n", + " # no change to metadata needed\n", + "\n", + " # concat \n", + " all_accts = pd.concat([not_alloc_hold, from_alloc_hold, remainder_alloc_hold], sort=True)\n", + "\n", + " # groupby sum, splitting all_accts into pos & neg\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " else:\n", + " print(f\"Shouldn't reach this point; diff is: {diff}\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def cur_upsample_avail_state_owned_historical(all_accts, juris):\n", + " \"\"\"\n", + " Takes allowances in auct_hold and assigns specific quantities to each auction within a given year.\n", + " \n", + " This isn't really an upsample, but it accomplishes the same end, of assigning quarterly quantities.\n", + " \n", + " Does this based on historical data.\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " # ~~~~~~~~~~~~~~~\n", + " # get auct_hold, state-owned, specified juris, with auct_type=='current'\n", + " # also exclude any future vintage allowances (unsold advance retained in auct_hold)\n", + " # select only \"current\" vintage == cq.date.year\n", + " # exclude any negative values in auct_hold\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('juris')==juris\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask5 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " annual_avail_1v = all_accts.loc[mask] \n", + " not_annual_avail_1v = all_accts.loc[~mask]\n", + " \n", + " # ~~~~~~~~~~~~~~~\n", + " # get historical data for how much newly available in each auction (quact_mod is only newly available)\n", + " # select only \"current\" vintage == cq.date.year\n", + " df = prmt.qauct_new_avail.copy()\n", + " hist_1v = df.loc[(df.index.get_level_values('inst_cat')==juris) &\n", + " (df.index.get_level_values('auct_type')=='current') &\n", + " (df.index.get_level_values('date_level').year==cq.date.year) & \n", + " (df.index.get_level_values('vintage')==cq.date.year)]\n", + " \n", + " max_date_cur = hist_1v.index.get_level_values('date_level').max()\n", + " latest_hist_year_cur = max_date_cur.year\n", + " latest_hist_quarter_cur = max_date_cur.quarter\n", + " \n", + " # ~~~~~~~~~~~~~~~~\n", + " # create hist_proj, in which projection will be added to historical\n", + " hist_proj = hist_1v.copy()\n", + " \n", + " if cq.date.year == latest_hist_year_cur and latest_hist_quarter_cur < 4:\n", + " # special case of partial year historical data\n", + " # need to fill in additional quarters, based on what's remaining in auct_hold\n", + " \n", + " # calculate number of remaining quarters\n", + " num_remaining_q = 4 - latest_hist_quarter_cur\n", + " \n", + " # remaining after historical quantities are made available prior to cq.date\n", + " remaining_tot = annual_avail_1v['quant'].sum() - hist_1v['quant'].sum()\n", + " \n", + " # average newly available per remaining quarter\n", + " avg_remaining = remaining_tot / num_remaining_q\n", + " \n", + " latest_hist_q = hist_1v.loc[hist_1v.index.get_level_values('date_level')==max_date_cur]\n", + " \n", + " # create proj_quarter, with new value (avg_remaining)\n", + " proj_qs = latest_hist_q.copy()\n", + " proj_qs.at[proj_qs.index, 'quant'] = avg_remaining # will fail if latest_hist_q is > 1 row\n", + " \n", + " # iterate through projection quarters (starting 1 quarter after latest_hist_quarter_cur)\n", + " for quarter in range(latest_hist_quarter_cur+1, 4+1): \n", + " date_1q = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " proj_1q = proj_qs.copy()\n", + " mapping_dict = {'date_level': date_1q}\n", + " proj_1q = multiindex_change(proj_1q, mapping_dict)\n", + " \n", + " hist_proj = pd.concat([hist_proj, proj_1q])\n", + " \n", + " all_accts = pd.concat([hist_proj, not_annual_avail_1v], sort=True)\n", + " \n", + " else: \n", + " # reached in 2 cases: \n", + " # case 1. cq.date.year < latest_hist_year_cur\n", + " # case 2. latest_hist_year_cur == cq.date.year and latest_hist_quarter_cur == 4:\n", + " # (this fn only runs if cq.date.year <= latest_hist_year_cur)\n", + " \n", + " # there is a full year of historical data for cq.date.year; \n", + " # simply use historical data\n", + " \n", + " # if hist_1v was more than was in alloc_hold, create deficit\n", + " hist_excess = hist_1v['quant'].sum() - annual_avail_1v['quant'].sum()\n", + " if hist_excess > 1e-7:\n", + " # create deficit in alloc_hold\n", + " # take row annual_avail_1v, update value\n", + " deficit = annual_avail_1v.copy()\n", + " deficit.at[deficit.index, 'quant'] = -1 * hist_excess\n", + " \n", + " # update metadata\n", + " mapping_dict = {'status': 'deficit', \n", + " 'date_level': cq.date}\n", + " deficit = multiindex_change(deficit, mapping_dict)\n", + " \n", + " all_accts = pd.concat([hist_1v, not_annual_avail_1v, deficit], sort=True)\n", + " \n", + " else:\n", + " all_accts = pd.concat([hist_1v, not_annual_avail_1v], sort=True)\n", + " \n", + "\n", + " # clean-up; exclude fractional, zero, NaN rows\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-10) | (all_accts['quant']<-1e-10)]\n", + " \n", + " # check for duplicate rows; if so, groupby sum for positive rows only\n", + " dups = all_accts.loc[all_accts.index.duplicated(keep=False)]\n", + " if dups.empty==False:\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7]\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']>-1e-7]\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_cur__from_alloc_hold_to_auct_hold_projection(all_accts, juris):\n", + " \"\"\"\n", + " For projection, transfers all allowances remaining in alloc_hold to become state-owned current.\n", + " \n", + " Processes all allowances for a given year (with date_level year == cq.date year).\n", + " \n", + " Occurs at the start of each year (Jan 1).\n", + " \n", + " Transfers specified allowances from alloc_hold to auct_hold.\n", + " \n", + " There may already be unsold from advance auctions in auct_hold; if so, this fn does groupby sum.\n", + " \n", + " Note: There is a separate fn for making the allowances in auct_hold available.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get all allowances in alloc_hold, inst_cat=='cap', for specified vintage and juris\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask3 = all_accts.index.get_level_values('inst_cat')=='cap'\n", + " mask4 = all_accts.index.get_level_values('juris')==juris\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " \n", + " to_transfer = all_accts.loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~\n", + " # for QC, set aside portion for full true-up (as initial estimated)\n", + " # first true-ups are distributed in Q3 of year after the initial allocation\n", + " if juris == 'QC':\n", + " # split to_transfer into two parts: set_aside_for_alloc and to_transfer\n", + " # will work only if:\n", + " # 1. to_transfer df is only 1 row\n", + " # 2. (cq.date.year, f\"{cq.date.year+1}Q1\") is unique in QC_alloc_full_proj\n", + " \n", + " # look up quantity that's set aside for full alloc\n", + " set_aside_for_alloc_quant = prmt.QC_alloc_full_proj.at[(cq.date.year, f\"{cq.date.year}Q1\"), 'quant']\n", + " \n", + " # create set_aside_for_alloc (df with 1 row)\n", + " set_aside_for_alloc = to_transfer.copy()\n", + " set_aside_for_alloc.at[set_aside_for_alloc.index[0], 'quant'] = set_aside_for_alloc_quant\n", + " \n", + " # update value in to_transfer\n", + " to_transfer_init = to_transfer.at[to_transfer.index[0], 'quant']\n", + " to_transfer.at[to_transfer.index[0], 'quant'] = to_transfer_init - set_aside_for_alloc_quant\n", + " \n", + " else: \n", + " # juris != 'QC'\n", + " set_aside_for_alloc = prmt.standard_MI_empty.copy()\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~ \n", + " \n", + " # update metadata for state_alloc_hold, to put into auct_hold & turn into state-owned allowances\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'inst_cat': juris, \n", + " 'auct_type': 'current',\n", + " 'newness': 'new', \n", + " 'status': 'not_avail'}\n", + " to_transfer = multiindex_change(to_transfer, mapping_dict)\n", + " \n", + " all_accts = pd.concat([to_transfer, set_aside_for_alloc, remainder], sort=False) \n", + " \n", + " dups = all_accts.loc[all_accts.index.duplicated(keep=False)]\n", + " \n", + " if dups.empty==False:\n", + " # there are duplicated indices; need to do groupby sum\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<-1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def cur_upsample_avail_state_owned_projection(all_accts, juris):\n", + " \"\"\"\n", + " For current auctions in a given year, sums newly available and advance unsold; upsamples and assigns date_level.\n", + " \n", + " This is idealized method for projections, in which one-quarter of annual total is available in each auction.\n", + " \n", + " Operates on specified juris, to keep current allowances of each jurisdiction separate.\n", + " \n", + " \"\"\"\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " dups = all_accts.loc[all_accts.index.duplicated(keep=False)]\n", + " if dups.empty==False:\n", + " # there are duplicated indices; need to do groupby sum\n", + " print(\"Warning\" + \"! There are duplicated indices; need to do groupby sum.\")\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<-1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " # get all current allowances in auct_hold, for specified vintage and juris\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('juris')==juris\n", + " mask3 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask4 = all_accts.index.get_level_values('auct_type')=='current'\n", + " # select state-owned allowances; those with inst_cat same as juris\n", + " mask5 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " auct_hold = all_accts.loc[mask]\n", + " not_auct_hold = all_accts.loc[~mask]\n", + " \n", + " if prmt.run_tests == True:\n", + " # tests: check that selection above has 'status'=='not_avail' & 'newness'=='new'\n", + " if auct_hold.index.get_level_values('newness').unique().tolist() != ['new']:\n", + " print(f\"{prmt.test_failed_msg} auct_hold had entries with newness != 'new'\")\n", + " if auct_hold.index.get_level_values('status').unique().tolist() != ['not_avail']:\n", + " print(f\"{prmt.test_failed_msg} auct_hold had entries with status != 'not_avail'\")\n", + " \n", + " # take total from above, divide by 4 to get average annual quantity\n", + " each_quarter = auct_hold / 4\n", + " \n", + " # create empty df; quarterly quantities with metadata for each quarter will be put into this df\n", + " upsampled_to_q = prmt.standard_MI_empty.copy()\n", + " \n", + " for quarter in [1, 2, 3, 4]:\n", + " one_quarter_date = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " one_quarter = each_quarter.copy()\n", + " mapping_dict = {'date_level': one_quarter_date}\n", + " one_quarter = multiindex_change(one_quarter, mapping_dict)\n", + " upsampled_to_q = pd.concat([upsampled_to_q, one_quarter])\n", + "\n", + " all_accts = upsampled_to_q.append(not_auct_hold)\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def upsample_advance_all_accts(all_accts):\n", + " \"\"\"\n", + " Takes annual quantities set aside for advance, upsamples to get quarterly quantities to be made available.\n", + " \n", + " Specifies date_level for each quarter, but does *not* assign status 'available'.\n", + " \"\"\"\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # select advance allowances to be upsampled\n", + " # when cq.date.year >= 2013, \n", + " # then upsample vintage = cq.date.year + 3\n", + " \n", + " mask1 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " mask2 = all_accts.index.get_level_values('vintage')==(cq.date.year+3)\n", + " mask3 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " # specifying acct_name=='auct_hold' shouldn't be necessary, but doesn't hurt\n", + " mask = (mask1) & (mask2) & (mask3)\n", + " \n", + " adv_to_upsample = all_accts.loc[mask] \n", + " \n", + " each_quarter = adv_to_upsample / 4\n", + " mapping_dict = {'status': 'not_avail'}\n", + " each_quarter = multiindex_change(each_quarter, mapping_dict)\n", + " \n", + " all_quarters = prmt.standard_MI_empty.copy()\n", + " for quarter in [1, 2, 3, 4]:\n", + " one_quarter_date = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " one_quarter = each_quarter.copy() \n", + " mapping_dict = {'date_level': one_quarter_date}\n", + " one_quarter = multiindex_change(one_quarter, mapping_dict)\n", + " all_quarters = pd.concat([all_quarters, one_quarter], sort=True)\n", + "\n", + " # recombine:\n", + " all_accts = pd.concat([all_quarters, all_accts.loc[~mask]], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances just for upsampled part\n", + " diff = all_quarters['quant'].sum() - adv_to_upsample['quant'].sum()\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in upsample_advance_all_accts.\")\n", + " # END OF TEST\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def adv_unsold_to_cur_all_accts(all_accts):\n", + " \"\"\"\n", + " Function similar to adv_unsold_to_cur, but operating on all_accts.\n", + " \n", + " Takes any unsold allowances from advance auctions that are in auct_hold account,\n", + " and updates metadata to change them into current auction allowances.\n", + " \n", + " Sums any unsold across all quarters in a calendar year, \n", + " (which will become part of total state-owned allowances to be made available in current auctions).\n", + " \n", + " Based on regulations:\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)(B) & (D)\n", + " QC: [QC regs Sep 2017], Section 54 (paragraph 2)\n", + " ON: [ON regs Jan 2018], Section 58(4)2\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # isolate allowances unsold at advance auctions\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " mask3 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask4 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4)\n", + " unsold_adv = all_accts.loc[mask]\n", + " all_accts_remainder = all_accts.loc[~mask]\n", + " \n", + " # update metadata in all_accts, creating adv_redes_to_cur\n", + " adv_redes_to_cur = unsold_adv.copy()\n", + " mapping_dict = {'auct_type': 'current', \n", + " 'newness': 'new', \n", + " 'status': 'not_avail',\n", + " 'date_level': prmt.NaT_proxy, \n", + " 'unsold_di': prmt.NaT_proxy, \n", + " 'unsold_dl': prmt.NaT_proxy}\n", + " adv_redes_to_cur = multiindex_change(adv_redes_to_cur, mapping_dict)\n", + " \n", + " # groupby sum to combine all unsold from advance auctions of a particular vintage\n", + " adv_redes_to_cur = adv_redes_to_cur.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine adv_redes_to_cur with remainder\n", + " all_accts = pd.concat([adv_redes_to_cur, all_accts_remainder], sort=True)\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def unsold_update_status(all_accts):\n", + " \"\"\"\n", + " Operates after auction, on any allowances still remaining in auct_hold with date_level == cq.date.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # for unsold, update metadata:\n", + " # in all_accts, get any allowances remaining in auct_hold with date_level == cq.date\n", + " # separate them out\n", + " # change metadata: change newness to unsold & status to not_avail\n", + " # recombine with remainder of all_accts\n", + " unsold_mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " unsold_mask2 = all_accts.index.get_level_values('date_level')==cq.date\n", + " unsold_mask3 = all_accts['quant'] > 0\n", + " unsold_mask = (unsold_mask1) & (unsold_mask2) & (unsold_mask3)\n", + " all_accts_unsold = all_accts.loc[unsold_mask]\n", + " all_accts_remainder = all_accts.loc[~unsold_mask]\n", + " \n", + " if all_accts_unsold['quant'].sum() > 0:\n", + " # update metadata in all_accts_unsold\n", + " mapping_dict = {'status': 'unsold', \n", + " 'unsold_dl': cq.date}\n", + " all_accts_unsold = multiindex_change(all_accts_unsold, mapping_dict)\n", + " \n", + " # separate those with an unsold_di != prmt.NaT_proxy from those with unsold_di == prmt.NaT_proxy\n", + " # for those with unsold_di == prmt.NaT_proxy (never unsold before), set new value of unsold_di to be cq.date; \n", + " # for the rest (were unsold before), don't change unsold_di\n", + " unsold_before_mask = all_accts_unsold.index.get_level_values('unsold_di')==prmt.NaT_proxy\n", + " unsold_di_NaT = all_accts_unsold.loc[unsold_before_mask]\n", + " mapping_dict = {'unsold_di': cq.date}\n", + " unsold_di_NaT = multiindex_change(unsold_di_NaT, mapping_dict)\n", + " \n", + " unsold_di_not_NaT = all_accts_unsold.loc[~unsold_before_mask]\n", + " \n", + " # recombine all_accts_remainder (above), unsold_di_NaT & unsold_di_not_NaT\n", + " all_accts = pd.concat([all_accts_remainder, \n", + " unsold_di_NaT,\n", + " unsold_di_not_NaT], sort=False)\n", + " \n", + " elif all_accts_unsold['quant'].sum() == 0.0:\n", + " pass\n", + " \n", + " else: # all_accts_unsold['quant'].sum() is neither > 0 nor == 0; is it negative? NaN?\n", + " print(\"Error\" + \"! all_accts_unsold['quant'] should be a float that's either zero or positive.\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_cur__from_alloc_hold_to_auct_hold_new_avail_anomalies(all_accts, juris):\n", + " \"\"\"\n", + " Force unsold allowances to be reintro (made available again) to reflect anomalies in historical data.\n", + " \n", + " Right now, only set up for QC.\n", + " \n", + " Anomalies are transfers from alloc_hold to auct_hold for vintages < cq.date.year.\n", + " \n", + " (Normal transfers, in \"historical\" version of this function, are for vintage == cq.date.year.)\n", + " \n", + " Occurs at the start of each year (Jan 1).\n", + " \n", + " Transfers specified allowances from alloc_hold to auct_hold.\n", + " \n", + " Operates only for newly available allowances, as specified manually in input file sheet 'quarterly auct hist'.\n", + " \n", + " Operates only for state-owned allowances.\n", + " \n", + " Note: There is a separate fn for making the allowances in auct_hold available.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get the historical data for those allowances with vintages < date_level year\n", + " df = prmt.qauct_new_avail.copy()\n", + " hist_to_be_avail_anom = df.loc[(df.index.get_level_values('inst_cat')==juris) & \n", + " (df.index.get_level_values('auct_type')=='current') &\n", + " (df.index.get_level_values('date_level')==cq.date) & \n", + " (df.index.get_level_values('vintage')0]\n", + " all_accts_pos = pd.concat([all_accts_pos, to_remove, to_transfer], sort=True)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " else: # hist_to_be_avail_anom.empty == True:\n", + " pass\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_VRE_retired_from_CIR():\n", + " \"\"\"\n", + " For Voluntary Renewable Electricity (VRE) allowances.\n", + " \n", + " Quantities retired inferred from quarter-to-quarter decreases in VRE account, as shown in CIR.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # as of the latest data available in the version of the Compliance Instrument Report fed in above\n", + " VRE = prmt.CIR_historical[['Voluntary Renewable Electricity']]\n", + " VRE = VRE.xs('vintaged allowances', level='Description')\n", + "\n", + " # rearrange to be able to calculate annual change (diff); negative of annual change is what was retired from VRE\n", + " VRE = VRE.unstack(0)\n", + " VRE.columns = VRE.columns.droplevel(0)\n", + " VRE.columns.name = 'CIR_date'\n", + " VRE.index.name = 'vintage'\n", + "\n", + " VRE_retired = VRE.T.diff().T * -1\n", + " VRE_retired = VRE_retired.replace(-0.0, np.NaN)\n", + " VRE_retired = VRE_retired.dropna(how='all')\n", + " VRE_retired = VRE_retired.T.dropna()\n", + " VRE_retired = VRE_retired.stack()\n", + " VRE_retired.name = 'quant'\n", + "\n", + " # convert to df\n", + " VRE_retired = pd.DataFrame(VRE_retired)\n", + " \n", + " # set object attribute\n", + " prmt.VRE_retired = VRE_retired\n", + " \n", + " # no return; func sets object attribute prmt.VRE_retired" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## HINDCAST FUNCTIONS UNIQUE TO QC" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_QC_auctions_2013Q4(all_accts):\n", + " \"\"\"\n", + " 2013Q4 was the first QC auction (and first for any juris in WCI market).\n", + " \n", + " It was anomalous, in that:\n", + " 1. There was only this single auction in 2012.\n", + " 2a. The 2013Q4 current auction had approximately a full year's worth of allowances available at once.\n", + " 2b. However, the current auction quantity was not all the allowances leftover after allocations were distributed.\n", + " 3. The 2013Q4 advance auction had available all vintage 2016 allowances at once.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # all_accts (for QC) starts empty\n", + " \n", + " # create QC allowances v2013-v2020, put into alloc_hold\n", + " all_accts = create_annual_budgets_in_alloc_hold(all_accts, prmt.QC_cap.loc[2013:2020])\n", + "\n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " logging.info(f\"total allowances created: {all_accts_sum_init}\")\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # transfer APCR allowances out of alloc_hold, into APCR_acct (for vintages 2013-2020)\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.QC_APCR_MI, 2013, 2020)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # QC regs (s. 38): all allowances not put in reserve account are transferred into \"Minister's allocation account\"\n", + " # then allocations and auction quantities go from there to other accounts\n", + " # in model, can leave all allowances in alloc_hold, and move allocations and auction quantities as specified\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # transfer advance into auct_hold\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.QC_advance_MI, 2013, 2020)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # transfer alloc out of alloc_hold to ann_alloc_hold\n", + " \n", + " # initial 75% of estimated alloc v2013 transferred in 2013Q4, before Q4 auction\n", + " # (this was later than what would be the usual pattern in later years)\n", + " \n", + " # (appropriate metadata is assigned to each set of allowances by convert_ser_to_df_MI_alloc) \n", + " # convert each alloc Series into df with MultiIndex\n", + " # then do the transfer for single vintage, cq.date.year+1\n", + " # (the to_acct is already specified in metadata for each set of allowances)\n", + " \n", + " all_accts = transfer_QC_alloc_init__from_alloc_hold(all_accts) \n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CURRENT AUCTION:\n", + " # look up quantity available in 2013Q4 auction; \n", + " # move that quantity from alloc_hold to auct_hold\n", + " # specify from vintage 2013\n", + " # note: the split between historical and projected data occurs within the fn below\n", + " # (so it is different than CA, which uses two different functions)\n", + "\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, 'QC')\n", + " all_accts = cur_upsample_avail_state_owned_historical(all_accts, 'QC')\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # ADVANCE AUCTION (2013Q4 anomaly):\n", + " # remember: all vintage 2016 allowances were available at once in this auction\n", + " # get all allowances aside for advance in auct_hold that are vintage 2016 (cq.date.year+3)\n", + " adv_new_mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " adv_new_mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " adv_new_mask3 = all_accts.index.get_level_values('vintage')==(cq.date.year+3)\n", + " adv_new_mask = (adv_new_mask1) & (adv_new_mask2) & (adv_new_mask3)\n", + " adv_new = all_accts.loc[adv_new_mask]\n", + " all_accts_remainder = all_accts.loc[~adv_new_mask]\n", + " \n", + " # for this anomalous advance auction, all of these allowances were available in one auction\n", + " # update metadata: change 'date_level' to '2013Q4'\n", + " # (later, metadata will be changed to available by function QC_state_owned_make_available)\n", + " mapping_dict = {'date_level': quarter_period('2013Q4')}\n", + " adv_new = multiindex_change(adv_new, mapping_dict)\n", + " \n", + " # recombine to create new version of all_accts\n", + " all_accts = pd.concat([adv_new, all_accts_remainder], sort=False)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, 'QC', parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def QC_state_owned_make_available(all_accts, auct_type):\n", + " \"\"\"\n", + " For any allowances in auct_hold, with date_level == cq.date, fn changes status to 'available'.\n", + " \n", + " Works for current auction and advance auction, as specified by argument auct_type.\n", + " \n", + " Currently works for QC-only auctions, which never met conditions for redesignation of unsold allowances.\n", + " \n", + " Need to rework the function to work for linked auctions.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test: conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " if auct_type in ['advance', 'current']:\n", + " # get allowances in auct_hold, for specified auct_type, for date_level == cq.date\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')==auct_type\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask = (mask1) & (mask2) & (mask3)\n", + " \n", + " # to be available\n", + " avail_1q = all_accts.loc[mask]\n", + " \n", + " # update status to 'available'\n", + " mapping_dict = {'status': 'available'}\n", + " avail_1q = multiindex_change(avail_1q, mapping_dict)\n", + "\n", + " # combine avail with remainder (~mask)\n", + " all_accts = avail_1q.append(all_accts.loc[~mask])\n", + " \n", + " else: # auct_type not 'advance' or 'current'\n", + " pass\n", + " \n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: for conservation of allowances\n", + " diff = all_accts['quant'].sum() - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in {inspect.currentframe().f_code.co_name}.\")\n", + " print(\"(Test using initial and final values within fn.)\")\n", + " print(f\"Was for auct_type: {auct_type}; diff is: {diff}\")\n", + " else:\n", + " pass\n", + " # END OF TEST\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, 'QC', parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def QC_early_action_distribution(all_accts):\n", + " \"\"\"\n", + " Create Early Action allowances and distribute them to gen_acct.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # for Early Action, use vintage 2199 as proxy non-vintage allowances\n", + " # this keeps Early Action separate from APCR (vintage 2200)\n", + " # data hard-coded into model, because one-off event\n", + " ser = pd.Series({2199: 2.040026})\n", + "\n", + " df = pd.DataFrame(ser)\n", + " df.columns = ['quant']\n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + "\n", + " df['acct_name'] = 'gen_acct'\n", + " df['auct_type'] = 'n/a'\n", + " df['juris'] = 'QC'\n", + "\n", + " # acct_name set above\n", + " df['date_level'] = prmt.NaT_proxy\n", + " # juris set above\n", + " # vintage set above\n", + " df['inst_cat'] = 'early_action'\n", + " # auct_type set above\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " df = df.set_index(prmt.standard_MI_names)\n", + "\n", + " QC_early_action = df \n", + " \n", + " all_accts = all_accts.append(QC_early_action)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_QC_alloc_init__from_alloc_hold(all_accts):\n", + " \"\"\"\n", + " Moves allocated allowances from alloc_hold into private accounts (gen_acct).\n", + " \n", + " Runs at the end of each year (except for in anomalous years).\n", + " \n", + " Only processes one vintage at a time; vintage is cq.date.year + 1 (except for in anomalous years).\n", + " \n", + " (If there's an anomalous transfer, i.e., for QC in 2013, would need to change this.\n", + " Could add if statement: if cq.date=='2013Q4', then for QC, do XYZ.)\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # convert QC_alloc_initial (df) into QC_alloc_i (ser)\n", + " QC_alloc_i = prmt.QC_alloc_initial.copy()\n", + " QC_alloc_i.index = QC_alloc_i.index.droplevel('allocation quarter')\n", + " QC_alloc_i = QC_alloc_i['quant']\n", + " \n", + " QC_alloc_i_1v = QC_alloc_i.loc[cq.date.year:cq.date.year]\n", + " QC_alloc_i_1v.name = f'QC_alloc'\n", + " \n", + " QC_alloc_i_1v_MI = convert_ser_to_df_MI_QC_alloc(QC_alloc_i_1v)\n", + "\n", + " QC_alloc_i_1v_MI.name = 'QC_alloc_initial'\n", + " \n", + " to_acct_MI_1v = QC_alloc_i_1v_MI\n", + " \n", + " # create df named remove, which is negative of to_acct_MI; rename column\n", + " remove = -1 * to_acct_MI_1v\n", + " # remove.columns = ['remove_quant']\n", + "\n", + " # set indices in remove df (version of to_acct_MI) to be same as from_acct\n", + " mapping_dict = {'acct_name': 'alloc_hold', \n", + " 'inst_cat': 'cap', \n", + " 'date_level': prmt.NaT_proxy}\n", + " remove = multiindex_change(remove, mapping_dict)\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_duplicated_indices(remove, parent_fn)\n", + "\n", + " # separate out any rows with negative values\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " \n", + " # combine dfs to subtract from from_acct & add to_acct_MI_1v\n", + " # (groupby sum adds the positive values in all_accts_pos and the neg values in remove)\n", + " all_accts_pos = pd.concat([all_accts_pos, remove, to_acct_MI_1v], sort=True)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine pos & neg\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_QC_alloc_trueups__from_alloc_hold(all_accts):\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " QC_alloc_trueups = prmt.QC_alloc_trueups\n", + " \n", + " # pre-test: conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get all the allocation true-ups that occur in a particular quarter \n", + " QC_alloc_trueup_1q = QC_alloc_trueups.loc[QC_alloc_trueups.index.get_level_values('allocation quarter')==cq.date]\n", + " \n", + " if len(QC_alloc_trueup_1q) > 0:\n", + " # there are true-ups to process for this quarter\n", + "\n", + " # from QC_alloc_trueup_1q, get all the emission years that there are true-ups for\n", + " emission_years = QC_alloc_trueup_1q.index.get_level_values('allocation for emissions year').tolist()\n", + "\n", + " # iterate through all emission years that had trueups in cq.date\n", + " for emission_year in emission_years:\n", + " # initialize transfer_accum; resets for each emission_year\n", + " transfer_accum = prmt.standard_MI_empty.copy()\n", + " \n", + " # get quantity of true-ups for specified emissions year (returns a Series)\n", + " QC_alloc_trueup_1q_1y = QC_alloc_trueup_1q.xs(emission_year, \n", + " level='allocation for emissions year', \n", + " drop_level=True)\n", + " \n", + " # initialize un-accumulator: quantity remaining of true-ups to transfer\n", + " trueup_remaining = QC_alloc_trueup_1q_1y['quant'].sum()\n", + " \n", + " # ***** SPECIAL CASE *****\n", + " # hard code anomaly in 2016Q3, in which APCR allowances were apparently used \n", + " # for true-up alloc for emission year 2015\n", + " if cq.date == quarter_period('2016Q3') and emission_year == 2015: \n", + " # transfer 0.826677 M from QC APCR to gen_acct\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='APCR_acct'\n", + " mask2 = all_accts.index.get_level_values('juris')=='QC'\n", + " mask = (mask1) & (mask2)\n", + " QC_ACPR_acct = all_accts.loc[mask]\n", + " \n", + " APCR_for_trueup_quant = 0.826677 # units: MMTCO2e\n", + " inst_cat_new_name = f\"QC_alloc_2015_APCR\" # used to update metadata below\n", + " \n", + " # create trueup_transfers df\n", + " trueup_transfers = QC_ACPR_acct.copy()\n", + " \n", + " # update quantity:\n", + " if len(trueup_transfers) == 1:\n", + " trueup_transfers.at[trueup_transfers.index[0], 'quant'] = APCR_for_trueup_quant\n", + " else:\n", + " print(\"Error\" + \"! There was more than one APCR row; method above for setting value doesn't work.\")\n", + " \n", + " # create remove df:\n", + " remove = -1 * trueup_transfers.copy()\n", + " \n", + " # update metadata for trueup_transfers; record allocation distribution date as 'date_level'\n", + " mapping_dict = {'acct_name': 'gen_acct', \n", + " 'inst_cat': inst_cat_new_name, \n", + " 'date_level': cq.date}\n", + " trueup_transfers = multiindex_change(trueup_transfers, mapping_dict)\n", + " \n", + " # do groupby sum of pos & neg, recombine\n", + " # concat all_accts, trueup_transfers, remove\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7]\n", + " all_accts_pos = pd.concat([all_accts_pos, trueup_transfers, remove], sort=True)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<-1e-7]\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " # update trueup_remaining, for use in regular processing below\n", + " trueup_remaining = trueup_remaining - APCR_for_trueup_quant\n", + " \n", + " else:\n", + " pass\n", + " # ***** SPECIAL CASE *****\n", + " \n", + " \n", + " # if positive true-ups (usual case)\n", + " if trueup_remaining > 0:\n", + " \n", + " # TEST: QC_alloc_trueup_1q_1y should only be a single row\n", + " if len(QC_alloc_trueup_1q_1y) != 1:\n", + " print(f\"{prmt.test_failed_msg} QC_alloc_trueup_1q_1y did not have 1 row. Here's QC_alloc_trueup_1q_1y:\")\n", + " print(QC_alloc_trueup_1q_1y)\n", + " # END OF TEST\n", + "\n", + " # get allowances in alloc_hold, for juris == 'QC', and vintage >= emission_year\n", + " # but exclude allowances set aside for advance auctions\n", + " mask1 = all_accts.index.get_level_values('juris')=='QC'\n", + " mask2 = all_accts.index.get_level_values('acct_name')=='alloc_hold'\n", + " mask3 = all_accts.index.get_level_values('inst_cat')=='cap'\n", + " mask4 = all_accts.index.get_level_values('vintage')>=emission_year\n", + " mask5 = all_accts['quant'] > 0\n", + " all_accts_trueup_mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " trueup_potential = all_accts.loc[all_accts_trueup_mask].sort_values(by='vintage')\n", + " \n", + " # try to draw from alloc_hold allowances of vintage == emission_year\n", + " # if not enough allowances, go to the next vintage\n", + " \n", + " # create df of those transferred:\n", + " # copy whole df trueup_potential, zero out values, then set new values in loop\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " trueup_potential = trueup_potential.sort_index()\n", + " \n", + " trueup_transfers = trueup_potential.copy() \n", + " trueup_transfers['quant'] = float(0)\n", + " # note: trueup_transfers winds up with zero rows because it is not built up from appending rows\n", + " # it is updated by setting new values as needed\n", + " \n", + " for row in trueup_potential.index:\n", + " potential_row_quant = trueup_potential.at[row, 'quant']\n", + " trueup_to_transfer_quant = min(potential_row_quant, trueup_remaining)\n", + "\n", + " # update un-accumulator for jurisdiction\n", + " trueup_remaining = trueup_remaining - trueup_to_transfer_quant\n", + "\n", + " # update trueup_potential\n", + " trueup_potential.at[row, 'quant'] = potential_row_quant - trueup_to_transfer_quant\n", + "\n", + " # update trueup_transfers\n", + " trueup_transfers.at[row, 'quant'] = trueup_to_transfer_quant\n", + " \n", + " # update metadata for transferred allowances\n", + " # record date of allocation in index level 'date_level'\n", + " mapping_dict = {'acct_name': 'gen_acct',\n", + " 'inst_cat': f'QC_alloc_{emission_year}', \n", + " 'date_level': cq.date}\n", + " trueup_transfers = multiindex_change(trueup_transfers, mapping_dict)\n", + " \n", + " # recombine transfer_accum, trueup_potential (what's remaining), and the rest of all_accts\n", + " all_accts = pd.concat([trueup_transfers, \n", + " trueup_potential, \n", + " all_accts[~all_accts_trueup_mask]], sort=False)\n", + " \n", + " # do groupby sum of pos & neg, recombine\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<-1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " # if negative true-ups\n", + " elif trueup_remaining < 0:\n", + " # print(f\"There was negative true-up of {trueup_remaining} M; negative true-ups not implemented currently.\")\n", + " # see boneyard for old code on negative true-ups\n", + " pass\n", + " \n", + " else:\n", + " # closing if trueup_remaining > 0, elif trueup_remaining < 0\n", + " print(f\"No QC true-ups processed; show QC_alloc_trueup_1q_1y: {QC_alloc_trueup_1q_1y}.\")\n", + " pass \n", + "\n", + " # end of \"for emission_year in emission_years:\"\n", + " \n", + " else:\n", + " # closing \"if len(QC_alloc_trueup_1q) > 0:\"\n", + " pass\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, 'QC_alloc')\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " \n", + " # after end of \"for emission_year in emission_years:\"\n", + " return(all_accts)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### end of functions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# REGULATIONS" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "## CA cap (divided into different types)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "### CA advance\n", + "* set in regs § 95870(b) & § 95871(b): for all years 2015-2031, 10% of cap\n", + "* allowances are offered in advance auctions held 3 years prior to their vintage\n", + "* started with vintage 2015 allowances offered in 2012 (first auction, held 2012-11)\n", + "* for budget years 2013-2020: \"Upon creation of the Auction Holding Account, the Executive Officer shall transfer 10 percent of the allowances from budget years 2015-2020 to the Auction Holding Account.\n", + "* for budget years 2021-2030: \"The Executive Officer shall transfer 10 percent of the allowances from budget years 2021 and beyond to the Auction Holding Account.\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "### CA Voluntary Renewable Electricity Reserve Account (aka VRE_reserve)\n", + "* set in regs § 95870(c): 0.5% of cap for 2013-2014, 0.25% of cap for 2015-2020\n", + "* there are no Voluntary Renewable Electricity Reserve Account allowances after 2020\n", + "* these are counted as non-vintage allowances, but can keep them here with year of cap they derive from\n", + "* these are not used toward compliance obligations, so once they are created, they are permanently removed from C&T supply\n", + "* for accounts: \"Upon creation of the Voluntary Renewable Electricity Reserve Account, the Executive Officer shall transfer allowances to the Voluntary Renewable Electricity Reserve Account...\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "### CA allocations:\n", + "* note: in the model, CA is the only jurisdiction with various types of allocations specified\n", + "* so industrial_alloc, etc., are only for CA\n", + "* for QC & ON, simply have alloc_QC and alloc_ON\n", + "* for accounts: \"The Executive Officer will allocate to the limited use holding account and allowance allocation holding account pursuant to sections 95892(b) and 95831(a)(6) for each electrical distribution utility by October 24 of each calendar year from 2013-2019 for allocations from 2014-2020 annual allowance budgets.\"\n", + " * so only move out of alloc_hold on that date in each year" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: Electrical Distribution Utility Sector Allocation (CA)\n", + "2013-2020: § 95870(d)(1), with details specified in regs § 95892(a)(1) & § 95892(a)(2): \n", + "* allocation = [base value] * [annual cap adjustment factors]\n", + " * base value: in 95870(d)(1), specified as 97.7 million allowances per year\n", + " * annual cap adjustment factors: in 95891 Table 9-2: Cap Adjustment Factor, c, Standard Activities\n", + " * percentage of elec alloc for each utility in 95892 Table 9-3\n", + " * details by utility in: https://www.arb.ca.gov/cc/capandtrade/allowanceallocation/edu-ng-allowancedistribution/electricity-allocation.xlsx\n", + "\n", + "ARB's numbers also in this pdf:\n", + "\"Annual Allocation to Electrical Distribution Utilities (EDU) under the Cap-and-Trade Regulation (Regulation) Rev. 2/4/2015\"\n", + "https://www.arb.ca.gov/cc/capandtrade/allowanceallocation/edu-ng-allowancedistribution/electricity-allocation.pdf\n", + "\n", + "(Note there was an earlier version Rev. 8/1/2014, but the sums there were off significantly from the total electricity allocation. Perhaps they made an error, which they corrected with 2/4/2015 version.)\n", + "\n", + "However, both of the pdfs say: \"This list neither substitutes for nor supplements the provisions of the Regulation and is intended to provide information about allowance allocation to EDUs based on the best available information as of the creation date indicated in the header. This is not a regulatory document.\"\n", + "\n", + "Danny's numbers matched 2015 version of pdf above.\n", + "\n", + "When I calculate based on the regs (including percentages in Table 9-3), and ROUND UP the allocation for each utility, then I get the same numbers as the totals in ARB's 2015 version, except that ARB's totals are 1 allowance (1 tCO2e) higher for 2019 & 2020.\n", + "\n", + "Anyway, use ARB's numbers (in 2017 Excel file) because that's what they published, they're close enough, and we won't know what they actually allocate for 2019 & 2020 for a while." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "2021-2030 Electrical Distribution Utility Sector Allocation (IOU & POU):\n", + "* 2021-2030: § 95871(c)(1)\n", + "* details determined by 95892(a), with allocation quantities explicitly stated in § 95892 Table 9-4\n", + "* (data copied from pdf (opened in Adobe Reader) into Excel; saved in input file)\n", + "* but utilities not identified in Table 9-4 as IOU or POU\n", + "* so merge with 2013-2020 df, and then compute sums for whole time span 2013-2030\n", + "* (also note this does not go through 2031, as cap does)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: Natural Gas Supplier Sector\n", + "for 2015-2020, § 95870(h); method described in § 95893 [specifically § 95893(a)]:\n", + "* note: natural gas supplier allocations only began in 2015\n", + "* allowances = [emissions in 2011] * [annual adjustment factor for natural gas]\n", + " * emissions: calculated per methods in §95852(c):\n", + " * \"Suppliers of Natural Gas. A supplier of natural gas covered under § 95811(c) and § 95812(d) has a compliance obligation for every metric ton CO2e of GHG emissions that would result from full combustion or oxidation of all fuel delivered to end users in California...\"\n", + " * annual adjustment factor for natural gas is the CA_cap_adjustment_factor\n", + "\n", + "for 2021 and beyond, § 95871(g)\n", + "* method is \"pursuant to sections § 95893(b) and § 95831(a)(6)\"\n", + "* § 95893(b): if entities don't specify consignment amount, full allocation will be consigned\n", + "* allocation quantities specified by § 95893(a), which is the same for 2015-2020 as for 2021-2030\n", + "\n", + "problem:\n", + "* the category of emissions specified in § 95852(c) don't seem to reported anywhere on their own (i.e, in MRR)\n", + "* regulations don't state what the value was in 2011\n", + "\n", + "solution: \n", + "* infer what the emissions in 2011 were that ARB used in its calculations\n", + "* emissions in 2011 = reported allocations for year X / adjustment factor for year X\n", + "* using this equation, can calculate emissions in 2011 from the allocation and adjustment factor for any particular year\n", + "* double-checked results by comparing multiple years (i.e., X = 2015, 2016, 2017, 2018)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "# note: method above very closely recreated historical values, \n", + "# but was sometimes off by 1 allowance (one time high, one time low)\n", + "\n", + "# for 2015, was exactly right (45.356999)\n", + "# for 2016, gave 44.444094 instead of historical value of 44.444093\n", + "# for 2017, gave 43.579236 instead of historical value of 44.579237\n", + "# for 2018, was exactly right (42,666,330)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "### CA allocation: Industrial & other\n", + "Following ARB's approach in 2018-03-02 workshop presentation (slide 9), group allocations into \"industrial & other\"\n", + "\n", + "Other category includes:\n", + "* Public Wholesale Water Agencies\n", + "* University Covered Entities and Public Service Facilities\n", + "* Legacy Contract Generators (already included with Industrial, from 2018 allocation report)\n", + "* thermal output\n", + "* waste-to-energy\n", + "* LNG suppliers\n", + "\n", + "Then after the latest historical data for allocations, use ARB's projection for what allocations would be for the \"industrial & other\" category, if assistance factors in 2018-2020 were retained at 50%/75% for high/medium risk sub-sectors.\n", + "\n", + "In that projection, assistance factors are 100% for years 2021-2030, so that projection shows their expectation for allocations, excluding any true-ups to make up (retroactively) for lower assistance factors 2018-2020." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: Industrial\n", + "* (variable allocation)\n", + "* rules for calculating for 2013-2020: § 95870(e)\n", + "* rules for calculating for 2021 and beyond: § 95871(d)\n", + "* more details in § 95891. ALLOCATION FOR INDUSTRY ASSISTANCE\n", + "\n", + "notes on assistance factors:\n", + "* in 2018, assistance factors followed regs as of Oct 2017, in Table 8-1:\n", + " * sub-sectors with \"low\" leakage risk were assigned industry assistance factor of 50%\n", + " * sub-sectors with \"medium\" leakage risk were assigned industry assistance factor of 75%\n", + "* in 2018, ARB proposed to raise assistance factors for all sub-sectors to 100%, including making retroactive allocations\n", + "* model uses current regulations, and actual allocation for 2018\n", + "\n", + "other notes:\n", + "* in 2018 allocation report, legacy contract generation allocation included with industrial allocation\n", + "* in 2017, legacy contract generation allocation was 0.37 MMTCO2e, and had been decreasing\n", + "* in 2018, this allocation would be < 1% of the category industrial allocation + legacy contract allocation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### Allocation to Public Wholesale Water Agencies:\n", + "(fixed allocation)\n", + "* 2013-2020: § 95870(d)(2)\n", + " * details in § 95895(a) and § 95895 Table 9-7: \"Allocation to Each Public Wholesale Water Agency\" [2015-2020]\n", + " \n", + " \n", + "* 2021 and beyond: § 95871(c)(2)\n", + " * details in § 95895(b)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: Allocation to University Covered Entities and Public Service Facilities\n", + "* (variable allocation)\n", + "* 2013-2020: § 95870(f)\n", + "* 2021 and beyond: § 95871(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: Allocation to Legacy Contract Generators\n", + "* (variable allocation)\n", + "* 2013-2020: § 95870(g)\n", + "* 2021 and beyond: § 95871(f)\n", + "* recall that for 2018, this allocation was included in industrial allocation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: thermal output\n", + "\n", + "full name, as listed in annual allocation reports: \n", + "\n", + "\"Allocation to Facilities with Limited Exemption of Emissions from the Producton of Qualified Thermal Output\"\n", + "\n", + "Related: § 95852(j) \"Limited Exemption of Emissions from the Production of Qualified Thermal Output.\"\n", + "* \"From 2013 through the year before which natural gas suppliers are required to consign 100% of allocated allowances to auction pursuant to Table 9-5 or 9-6...\" \n", + "* Note: natural gas suppliers have to consign 100% in 2030 and beyond\n", + "* regs don't specify exact amount, except that it's zero from 2030 onward\n", + "* values 2015-2016 from annual allocation reports; no values 2013-2014, nor 2017-2018\n", + "* NZ INTERNAL: see Trello card for this; it's about thermal output & waste-to-energy (https://trello.com/c/F10WGo7z)\n", + "\n", + "Mason's reading (May 2018): \n", + "* This appears to have been a limited-time allocation. Model assumes it's zero from 2017 onward.\n", + "* Annual allocation report had allocations for \"qualified thermal output\" of vintage 2015 (for 2013 emissions) and vintage 2016 allowances (for 2014 emissions), as stated on first page of allocations reports. \n", + "* These allocations may have been to satisfy emissions obligations incurred under an earlier version of the regulations that did not include this exemption." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: Waste-to-Energy\n", + "* (variable allocation)\n", + "* regs don't specify exact amount; values 2015-2018 from annual allocation reports\n", + "* note that 2018 value is the sum of allocations for three facilities:\n", + " * 100063 – Southeast Resource Recovery Facility (SERRF)\n", + " * 100064 – LACSD - Commerce Refuse To Energy\n", + " * 101264 – Covanta - Stanislaus, Inc\n", + "* Related: § 95852(k): \"Limited Exemption of Emissions for Waste-to-Energy Facilities\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: LNG suppliers\n", + "* full name: \"Suppliers of Liquefied Natural Gas and Compressed Natural Gas\"\n", + "* § 95852(l)(1), \"Limited Exemption for Emissions from LNG Suppliers,\" describes exemption\n", + "* This category had an allocation in 2018, for emissions with compliance obligations in second compliance period (2015-2017). This category is given the limited exemption for emissions in years from 2018 onward.\n", + "* Note that regulations specify that there could be a true-up of the allocation, from vintage 2019 (§ 95852(l)(1))\n", + "* According to 2016 ISOR, will not have any compliance obligation for years 2018 and after, so no more allocations for this category after 2017 (except for true-up noted above).\n", + " * NZ INTERNAL: see https://trello.com/c/wPnfBr2B) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "#### CA allocation: industrial & other" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "## Quebec: inputs\n", + "\n", + "* get amount of newly available current allowances for Canadian provinces\n", + "* they don't have consignment, so this is only state-owned allowances being introduced for the first time\n", + "* (reminder: this category does not include unsold adv reintro as cur)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "**Quebec regulations:**\n", + "\n", + "full version: http://legisquebec.gouv.qc.ca/en/ShowDoc/cr/Q-2,%20r.%2046.1\n", + "\n", + "cap amounts (to 2020): \n", + "* from Quebec Environment Quality Act (chapter Q-2), r. 15.2\n", + "* http://legisquebec.gouv.qc.ca/en/ShowDoc/cr/Q-2,%20r.%2015.2\n", + "* Published Gazette Officielle du Québec, December 19, 2012, Vol. 144, No. 51 (http://www2.publicationsduquebec.gouv.qc.ca/dynamicSearch/telecharge.php?type=1&file=2389.PDF)\n", + "\n", + "cap amounts (2021-2030):\n", + "* from Quebec Environment Quality Act (chapter Q-2), r. 15.3\n", + "* http://legisquebec.gouv.qc.ca/en/ShowDoc/cr/Q-2,%20r.%2015.3\n", + "* Published in Gazette Officielle du Québec, August 31, 2017, Vol. 149, No. 35A (http://www2.publicationsduquebec.gouv.qc.ca/dynamicSearch/telecharge.php?type=1&file=103120.pdf)\n", + "\n", + "advance auction amounts: \n", + "* amounts to be made available for advance auction do not seem to be specified in regulations\n", + " * NZ INTERNAL: see https://trello.com/c/3btJGiM7\n", + "* in practice, amounts made available in advance auction have been exactly 10% of annual caps\n", + "* WCI annual auction notice for 2018 states: \"Advance Auction Allowances Offered for Sale: The Advance Auction budget represents 10 percent of the allowances from each of the jurisdiction’s allowance budgets that are created for the year three years subsequent to the current calendar year.\"\n", + "\n", + "reserve amounts: \n", + "* from Quebec Environment Quality Act (chapter Q-2), r. 46.1, s. 38\n", + "* http://legisquebec.gouv.qc.ca/en/showversion/cr/Q-2,%20r.%2046.1?code=se:38&pointInTime=20180119\n", + "\n", + "allocations (variable):\n", + "* from Quebec Environment Quality Act (chapter Q-2), r. 46.1, s. 39.\n", + " * http://legisquebec.gouv.qc.ca/en/showversion/cr/Q-2,%20r.%2046.1?code=se:39&pointInTime=20180119\n", + "* from Quebec Environment Quality Act (chapter Q-2), r. 46.1, s. 41.\n", + " * http://legisquebec.gouv.qc.ca/en/ShowDoc/cr/Q-2,%20r.%2046.1\n", + "\n", + "remainder (cap - advance - reserve - allocation) goes to current auction as state-owned allowances\n", + "* based on historical practice" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def get_QC_inputs():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # get record of retirements (by vintage) from compliance reports\n", + " QC_cap_data = pd.read_excel(prmt.input_file, sheet_name='QC cap data')\n", + "\n", + " # get cap amounts from input file\n", + " QC_cap = QC_cap_data[QC_cap_data['name']=='QC_cap'].set_index('year')['data']\n", + " QC_cap.name = 'QC_cap'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # calculate advance for each year (2013-2030) on the basis that advance is 10% of cap\n", + " # annual auction notice 2018 says: \n", + " # \"Advance Auction Allowances Offered for Sale:\n", + " # The Advance Auction budget represents 10 percent of the allowances from each of the jurisdiction’s allowance \n", + " # budgets that are created for the year three years subsequent to the current calendar year.\"\n", + " QC_advance_fraction = QC_cap_data[QC_cap_data['name']=='QC_advance_fraction'].set_index('year')['data']\n", + " QC_advance = QC_cap * QC_advance_fraction\n", + " QC_advance.name = 'QC_advance'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # calculate reserve quantities, using reserve fraction in input file, multiplied by cap amounts\n", + " QC_APCR_fraction = QC_cap_data[QC_cap_data['name']=='QC_APCR_fraction'].set_index('year')['data']\n", + " QC_APCR = QC_cap * QC_APCR_fraction\n", + " QC_APCR.name = 'QC_APCR'\n", + "\n", + " # new regulations for QC:\n", + " # assume that QC will *not* increase its APCR, as ARB informally suggested it would for post-2020\n", + " \n", + " return(QC_cap, QC_advance, QC_APCR)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "## historical data from CIR: allowances, offsets, VRE retirements\n", + "CIR = Compliance Instrument Reports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def get_CIR_data_and_clean():\n", + " # code here is for files that have the latest sheets covering jurisdictions CA, QC, ON;\n", + " # if using earlier file as input, need to change jurisidictions in file name\n", + "\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " CIR_sheet_names = pd.ExcelFile(prmt.CIR_excel).sheet_names\n", + " \n", + " # initialize lists\n", + " CIR_allowances_list = []\n", + " CIR_offsets_list = []\n", + " # forest_buffer_stock = pd.DataFrame()\n", + " \n", + " for sheet in CIR_sheet_names: \n", + " # get records for each quarter\n", + " one_quart = pd.read_excel(prmt.CIR_excel, header=6, sheet_name=sheet)\n", + " \n", + " # record sheet name in a column of the df\n", + " one_quart['quarter'] = sheet\n", + "\n", + " # look in first column ('Vintage'), find the rows labeled 'Allowances Subtotal' and 'Offset Credits Subtotal'\n", + " # print(one_quart['Vintage'].tolist())\n", + " # indices = [i for i, s in enumerate(mylist) if 'aa' in s]\n", + " first_col_as_list = one_quart['Vintage'].astype(str).tolist()\n", + " allow_subtot_index = [i for i, s in enumerate(first_col_as_list) if 'Allowances Subtotal' in s][0]\n", + " offset_subtot_index = [i for i, s in enumerate(first_col_as_list) if 'Offset Credits Subtotal' in s][0]\n", + " # note [0] at end of two lines above; this takes 0th item in list, which is an integer\n", + "\n", + " # get allowances:\n", + " # use -1 to cut off 'Allowances Subtotal'\n", + " one_quart_allow = one_quart.loc[0:allow_subtot_index-1]\n", + " # get offsets:\n", + " # use -1 to cut off 'Offset Credits Subtotal'\n", + " one_quart_offset = one_quart.loc[allow_subtot_index+1:offset_subtot_index-1]\n", + "\n", + " CIR_allowances_list += [one_quart_allow]\n", + " CIR_offsets_list += [one_quart_offset]\n", + "\n", + "# # get forest buffer amounts out of notes at bottom of each sheet:\n", + "# buffer_prefix = '\\+ There are an additional '\n", + "# buffer_suffix = ' U.S. Forest Project Offset Credits in the CARB Forest Buffer Account. '\n", + "\n", + "# # get forest buffer account amounts\n", + "# first_col_as_list = one_quart['Vintage'].astype(str).tolist()\n", + "# forest_buffer_index = [i for i, s in enumerate(first_col_as_list) if 'Forest Buffer' in s]\n", + "# if len(forest_buffer_index) > 0:\n", + "# forest_buffer = first_col_as_list[forest_buffer_index[0]]\n", + "# forest_buffer = int(forest_buffer.lstrip(buffer_prefix).rstrip(buffer_suffix).replace(',', ''))\n", + "# else:\n", + "# forest_buffer = 0\n", + "# forest_buffer_stock = forest_buffer_stock.append({'quarter': sheet, 'forest buffer stock': forest_buffer}, ignore_index=True)\n", + " \n", + " # end of loop \"for sheet in CIR_sheet_names:\"\n", + " \n", + " # convert lists of dfs above into single dfs\n", + " CIR_allowances = pd.concat(CIR_allowances_list, axis=0, sort=True)\n", + " CIR_offsets = pd.concat(CIR_offsets_list, axis=0, sort=True)\n", + " \n", + " # call functions to clean up allowances and offsets\n", + " CIR_allowances = clean_CIR_allowances(CIR_allowances)\n", + " CIR_offsets = clean_CIR_offsets(CIR_offsets)\n", + "\n", + " # combine cleaned versions of allowances and offsets\n", + " # create CIR_historical (new df)\n", + " prmt.CIR_historical = pd.concat([CIR_allowances, CIR_offsets], sort=True)\n", + " # note this does not include Forest Buffer; \n", + " # see \"CA-QC-ON quarterly compliance instrument report - wrangling data 2018-03-06.ipynb\"\n", + "\n", + " # create CIR_offsets_q_sums, used later for CIR comparison\n", + " # these are sums across the different categories of offsets, \n", + " # but retain the full set of various accounts, & showing offsets in private vs. jurisdiction accounts\n", + " df = CIR_offsets.copy().reset_index()\n", + " df = df.drop(['Description', 'Vintage'], axis=1)\n", + " df = df.set_index('date')\n", + " prmt.CIR_offsets_q_sums = df\n", + " \n", + " # no return; func sets object attributes prmt.CIR_historical & prmt.CIR_offsets_q_sums" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def clean_CIR_allowances(CIR_allowances):\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " CIR_allowances = CIR_allowances.reset_index(drop=True) \n", + "\n", + " CIR_allowances.columns = CIR_allowances.columns.str.replace('\\n', '')\n", + " \n", + " # combine and clean up columns with changing names across quarters:\n", + " # new_list = [expression(i) for i in old_list if filter(i)]\n", + " for col_type in ['Retirement', \n", + " 'Voluntary Renewable Electricity', \n", + " 'Limited Use Holding Account', \n", + " 'Environmental Integrity']:\n", + " \n", + " CIR_sel_cols = [col for col in CIR_allowances.columns if col_type in col]\n", + " CIR_allowances[col_type] = CIR_allowances[CIR_sel_cols].sum(axis=1, skipna=True)\n", + "\n", + " for col in CIR_sel_cols:\n", + " if '*' in col or '(' in col:\n", + " CIR_allowances = CIR_allowances.drop(col, axis=1)\n", + "\n", + " CIR_allowances.insert(0, 'Description', CIR_allowances['Vintage'])\n", + " for item in range(2013, 2030+1):\n", + " CIR_allowances['Description'] = CIR_allowances['Description'].replace(item, 'vintaged allowances', regex=True)\n", + "\n", + " non_vintage_map = {'Non-Vintage Québec Early Action Allowances (QC)': 'early_action', \n", + " 'Non-Vintage Price Containment Reserve Allowances': 'APCR', \n", + " 'Allowances Subtotal': np.NaN}\n", + " CIR_allowances['Vintage'] = CIR_allowances['Vintage'].replace(non_vintage_map)\n", + "\n", + " CIR_allowances['quarter'] = CIR_allowances['quarter'].str.replace(' ', '')\n", + " CIR_allowances['quarter'] = pd.to_datetime(CIR_allowances['quarter']).dt.to_period('Q')\n", + " CIR_allowances = CIR_allowances.rename(columns={'quarter': 'date', 'Total': 'subtotal'})\n", + "\n", + " CIR_allowances = CIR_allowances.set_index(['date', 'Description', 'Vintage'])\n", + "\n", + " # convert units to MMTCO2e\n", + " CIR_allowances = CIR_allowances/1e6\n", + " \n", + " return(CIR_allowances)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def clean_CIR_offsets(CIR_offsets):\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " CIR_offsets = CIR_offsets.reset_index(drop=True) \n", + "\n", + " CIR_offsets.columns = CIR_offsets.columns.str.replace('\\n', '')\n", + "\n", + " CIR_offsets = CIR_offsets.rename(columns={'Vintage': 'Offset type'})\n", + " CIR_offsets['Offset type'] = CIR_offsets['Offset type'].str.rstrip().str.rstrip().str.rstrip('+').str.rstrip('*')\n", + " # could also use map (wrapped in list), or translate, or ...\n", + "\n", + " CIR_offsets_names = CIR_offsets['Offset type'].unique().tolist()\n", + " CIR_offsets_names.remove('California')\n", + " CIR_offsets_names.remove('Québec')\n", + " CIR_offsets_names.remove(np.NaN)\n", + "\n", + " CIR_offsets['Jurisdiction'] = CIR_offsets['Offset type']\n", + " CIR_offsets = CIR_offsets.dropna(subset=['Jurisdiction'])\n", + "\n", + " for row in CIR_offsets.index:\n", + " if CIR_offsets.at[row, 'Jurisdiction'] in CIR_offsets_names:\n", + " CIR_offsets.at[row, 'Jurisdiction'] = np.NaN\n", + " CIR_offsets['Jurisdiction'] = CIR_offsets['Jurisdiction'].fillna(method='ffill')\n", + " CIR_offsets = CIR_offsets[CIR_offsets['Offset type'].isin(CIR_offsets_names)]\n", + "\n", + " for col in ['General', 'Total']:\n", + " CIR_offsets[col] = CIR_offsets[col].astype(str)\n", + " CIR_offsets[col] = CIR_offsets[col].str.replace('\\+', '')\n", + " CIR_offsets[col] = CIR_offsets[col].str.replace('5,043,925 5,017,043', '5017043')\n", + " CIR_offsets[col] = CIR_offsets[col].astype(float)\n", + "\n", + " CIR_offsets['Offset type'] = CIR_offsets['Offset type'].str.rstrip(' (CA)')\n", + " CIR_offsets['Offset type'] = CIR_offsets['Offset type'].str.rstrip(' (QC)')\n", + "\n", + " CIR_offsets['quarter'] = CIR_offsets['quarter'].str.replace(' ', '')\n", + " CIR_offsets['quarter'] = pd.to_datetime(CIR_offsets['quarter']).dt.to_period('Q')\n", + " CIR_offsets = CIR_offsets.rename(columns={'quarter': 'date'})\n", + "\n", + " \n", + " # combine and clean up columns with changing names across quarters:\n", + " # new_list = [expression(i) for i in old_list if filter(i)]\n", + " for col_type in ['Retirement', \n", + " 'Voluntary Renewable Electricity', \n", + " 'Limited Use Holding Account', \n", + " 'Environmental Integrity']:\n", + " \n", + " CIR_sel_cols = [col for col in CIR_offsets.columns if col_type in col]\n", + " CIR_offsets[col_type] = CIR_offsets[CIR_sel_cols].sum(axis=1, skipna=True)\n", + "\n", + " for col in CIR_sel_cols:\n", + " if '*' in col or '(' in col:\n", + " CIR_offsets = CIR_offsets.drop(col, axis=1)\n", + " \n", + " CIR_offsets['General'] = CIR_offsets['General'].astype(float)\n", + " CIR_offsets['Total'] = CIR_offsets['Total'].astype(float)\n", + "\n", + " CIR_offsets = CIR_offsets.rename(columns={'Offset type': 'Description', \n", + " 'Total': 'subtotal'})\n", + " # CIR_offsets['Vintage'] = 'n/a'\n", + " CIR_offsets = CIR_offsets.set_index(['date', 'Description', 'Jurisdiction'])\n", + "\n", + " # convert units to MMTCO2e\n", + " CIR_offsets = CIR_offsets/1e6\n", + "\n", + " # sum over types of offsets, jurisdictions\n", + " CIR_offsets = CIR_offsets.groupby('date').sum()\n", + "\n", + " CIR_offsets['Description'] = 'offsets'\n", + " CIR_offsets['Vintage'] = 'n/a'\n", + "\n", + " CIR_offsets = CIR_offsets.set_index([CIR_offsets.index, 'Description', 'Vintage'])\n", + " \n", + " return(CIR_offsets)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# FUNCTIONS to process auctions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def retire_for_EIM_outstanding(all_accts):\n", + " \n", + " \"\"\"\n", + " For CA, moves allowances to Retirement account, to account for emissions not counted in Energy Imbalance Market.\n", + " \n", + " Under current regs (Oct 2017), § 95852(b)(1)(D):\n", + " \"EIM Outstanding Emissions. Beginning January 1, 2018, ARB will retire current vintage allowances designated by \n", + " ARB for auction pursuant to section 95911(f)(3) that remain unsold in the Auction Holding Account for more than \n", + " 24 months in the amount of EIM Outstanding Emissions as defined in section 95111(h) of MRR.\"\n", + " \n", + " It seems that under current regs, if the Auction Holding Account no longer has CA allowances that have been\n", + " there for more than 24 months, then there is no stipulation for removing allowances from other pools.\n", + " \n", + " Regarding proposed new regs:\n", + " \n", + " CARB's \"Post-2020 Caps\" proposal said: \n", + " \"Retirement of allowances to account for 'missing' imported electricity emissions in the Energy Imbalance Market... \n", + " could be several million allowances a year from 2018 through 2020....\"\n", + " &\n", + " \"This value is currently unknown for the period between 2018 and 2020, but could be tens of millions of allowances.\n", + " Thus, it is anticipated that there will be fewer pre-2021 unused allowances available to help with meeting \n", + " post-2020 obligations.\"\n", + " \n", + " (The \"tens of millions\" apparently refers to cumulative over 2018-2020, \n", + " which would seem to imply they're thinking of annual average ~7 M or higher.)\n", + " \n", + " \n", + " Proposed new regs:\n", + " \n", + " Proposed 95911(h)(2): Starting in 2019, 2018 + Q1 2019 EIM Outstanding Emissions will be retired from \n", + " the budget year two years after the current budget year. \n", + " \n", + " Those will be retired by 2019Q4 and 2020Q4, respectively.\n", + " Would come from vintage 2021 and 2022, respectively.\n", + " \n", + " § 95852(b)(1)(D): Starting in Q2 2019, EIM Purchasers will have compliance obligations, which\n", + " \"shall be calculated pursuant to MRR section 95111(h)(3).\"\n", + " \n", + " For EIM Outstanding Emissions (those beyond what carries a compliance obligation):\n", + " § 95852(b)(1)(E):\n", + " (1) up to Mar 31, 2019, \"calculated pursuant to MRR section 95111(h)(1)\"\n", + " (1) starting Apr 1, 2019, \"calculated pursuant to MRR section 95111(h)(2)\"\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # cut-off date: if allowances with unsold_di of this date or earlier remain unsold,\n", + " # they are eligible to be retired for EIM\n", + " \n", + " # this function runs after current auction in cq.date, \n", + " # so any remaining unsold for 2 years at the time the function runs...\n", + " # ... will still be unsold at the start of the next quarter, at which point they'll be unsold > 2 years\n", + " cut_off_date = quarter_period(f\"{cq.date.year-2}Q{cq.date.quarter}\")\n", + " \n", + " if cq.date.year in [2018, 2019, 2020]:\n", + " if prmt.CA_post_2020_regs == 'Regs_Oct_2017':\n", + " # EIM outstanding processed using Oct 2017 version of regulations\n", + " # then draw EIM retirements from unsold\n", + " # get unsold CA state-owned current allowances in auct_hold, with unsold_di > 2 years (24 mo.) earlier\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='CA'\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask5 = all_accts.index.get_level_values('unsold_di') <= cut_off_date # see note below\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + "\n", + " # note: for applying cut_off_date, should be sufficient to use unsold_di==cut_off_date, but used <= to be safe\n", + "\n", + " # unsold to potentially retire for EIM\n", + " retire_potential = all_accts.copy().loc[mask]\n", + " \n", + " elif prmt.CA_post_2020_regs in ['Preliminary_Discussion_Draft', 'Proposed_Regs_Sep_2018']:\n", + " if cq.date.year == 2018:\n", + " # EIM outstanding processed using Oct 2017 version of regulations\n", + " # then draw EIM retirements from unsold\n", + " # get unsold CA state-owned current allowances in auct_hold, with unsold_di > 2 years (24 mo.) earlier\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='CA'\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask5 = all_accts.index.get_level_values('unsold_di') <= cut_off_date # see note below\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + "\n", + " # note: for applying cut_off_date, should be sufficient to use unsold_di==cut_off_date, but used <= to be safe\n", + "\n", + " # unsold to potentially retire for EIM\n", + " retire_potential = all_accts.copy().loc[mask]\n", + "\n", + " elif cq.date.year in [2019, 2020]:\n", + " # EIM outstanding processed using (proposed) Sep 2018 version of regulations\n", + " # then draw EIM retirements from cap allowances in alloc_hold\n", + " # assume this would come from vintage == cq.date.year + 2\n", + " # (because cq.date.year allowances already distributed to various purposes by the time EIM processed)\n", + " mask1 = all_accts.index.get_level_values('acct_name') == 'alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('juris') == 'CA'\n", + " mask3 = all_accts.index.get_level_values('inst_cat') == 'cap'\n", + " mask4 = all_accts.index.get_level_values('vintage') == cq.date.year + 2\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " # cap to potentially retire for EIM\n", + " retire_potential = all_accts.copy().loc[mask]\n", + "\n", + " else:\n", + " # cq.date.year is not 2018 or 2019\n", + " pass\n", + " \n", + " # then process retirements, using mask and retire_potential set above\n", + " \n", + " # get quantity to be retired in cq.date.year; \n", + " # initialization of variable that will be updated\n", + " EIM_retirements = assign_EIM_retirements()\n", + " \n", + " EIM_remaining = EIM_retirements.at[cq.date.year]\n", + " \n", + " # create df for adding transfers; copy of retire_potential, but with values zeroed out\n", + " # sort_index to ensure earliest vintages are drawn from first\n", + " retire_potential = retire_potential.sort_index()\n", + " to_retire = retire_potential.copy()\n", + " to_retire['quant'] = float(0)\n", + " \n", + " for row in retire_potential.index:\n", + " potential_row_quant = retire_potential.at[row, 'quant']\n", + " to_retire_quant = min(potential_row_quant, EIM_remaining)\n", + "\n", + " # update un-accumulator for jurisdiction\n", + " EIM_remaining += -1 * to_retire_quant\n", + "\n", + " # update retire_potential\n", + " retire_potential.at[row, 'quant'] = potential_row_quant - to_retire_quant\n", + "\n", + " # update to_retire\n", + " to_retire.at[row, 'quant'] = to_retire_quant\n", + " \n", + " # what remains in retire_potential is not retired; to be concat with other pieces below\n", + " \n", + " mapping_dict = {'acct_name': 'retirement', \n", + " 'inst_cat': 'EIM_retire', \n", + " 'date_level': cq.date}\n", + " to_retire = multiindex_change(to_retire, mapping_dict)\n", + " \n", + " # concat to_retire with all_accts remainder\n", + " all_accts = pd.concat([all_accts.loc[~mask], retire_potential, to_retire], sort=True)\n", + " \n", + " logging.info(f\"in {cq.date}: retired {to_retire['quant'].sum()} M for EIM Outstanding Emissions\")\n", + " \n", + " \n", + " else:\n", + " # end of \"if cq.date.year in [2018, 2019]:\"\n", + " # no EIM retirements\n", + " pass\n", + "\n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'EIM retirement'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def retire_for_bankruptcy(all_accts):\n", + " \n", + " \"\"\"\n", + " No bankruptcy retirements under current regs (Oct 2017).\n", + " \n", + " In proposed new regs (Sep 2018), bankruptcy retirements added in 95911(h)(1).\n", + " \n", + " 95911(h)(1): Starting in 2019, allowances will be retired to account for outstanding compliance obligations \n", + " due to bankruptcy, from the budget two years after the current budget year.\n", + " \n", + " Starting in 2019, allowances will be retired to account for outstanding compliance obligations due to bankruptcy,\n", + " from the budget two years after the current budget year.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " bankruptcy_retirements = assign_bankruptcy_retirements()\n", + " \n", + " if prmt.CA_post_2020_regs in ['Preliminary_Discussion_Draft', 'Proposed_Regs_Sep_2018']:\n", + " if cq.date.year in bankruptcy_retirements.index.tolist():\n", + " # bankruptcy retirements come from annual budgets\n", + " # If processed in Q4 of each year, would have to come out of annual budget for following year\n", + " # so bankruptcy retirement in 2019Q4 would come from 2020 annual budget\n", + "\n", + " # get alloc_hold for vintage == cq.date.year + 2\n", + " mask1 = all_accts.index.get_level_values('acct_name') == 'alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('vintage') == cq.date.year + 2\n", + " mask = (mask1) & (mask2)\n", + "\n", + " # to avoid error \"A value is trying to be set on a copy of a slice from a DataFrame.\"...\n", + " # ... use .copy() when creating slice of alloc_hold_yr_plus_2 \n", + " # ... & use .copy() in creating to_retire from alloc_hold_yr_plus_2\n", + " alloc_hold_yr_plus_2 = all_accts.copy().loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + "\n", + " # run only if df has length = 1\n", + " if len(alloc_hold_yr_plus_2) == 1:\n", + "\n", + " # repeat slice of all_accts, to get df to modify for retirement\n", + " # set value equal to quantity specified in Series bankruptcy_retirements\n", + " to_retire = alloc_hold_yr_plus_2.copy()\n", + " to_retire.at[to_retire.index, 'quant'] = bankruptcy_retirements.at[cq.date.year]\n", + " mapping_dict = {'acct_name': 'retirement', \n", + " 'inst_cat': 'bankruptcy', \n", + " 'date_level': cq.date, }\n", + " to_retire = multiindex_change(to_retire, mapping_dict)\n", + " \n", + " # update alloc_hold to have quantity remaining after retirement\n", + " alloc_hold_yr_plus_2_original = alloc_hold_yr_plus_2['quant'].sum()\n", + " alloc_hold_yr_plus_2_new = alloc_hold_yr_plus_2_original - bankruptcy_retirements.at[cq.date.year]\n", + " alloc_hold_yr_plus_2.at[alloc_hold_yr_plus_2.index, 'quant'] = alloc_hold_yr_plus_2_new\n", + "\n", + " # recombine dfs:\n", + " all_accts = pd.concat([remainder, alloc_hold_yr_plus_2, to_retire])\n", + "\n", + " else:\n", + " print(\"Error\" + \"! alloc_hold_yr_plus_2 was not a single row; here's the df:\")\n", + " print(alloc_hold_yr_plus_2)\n", + " else: # cq.date.year not in bankruptcy_retirements.index.tolist()\n", + " # no other known planned retirements for bankruptcies; no projection for future bankruptcies\n", + " pass \n", + " else: \n", + " # current regs (Oct 2017) don't include bankruptcy\n", + " pass\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_unsold__from_auct_hold_to_APCR(all_accts):\n", + " \"\"\"\n", + " For CA, transfers unsold stock to APCR if they have gone unsold for more than 24 months, \n", + " as long as they haven't already been retired for EIM removal.\n", + " \n", + " Based on CA regs (QC & ON have no roll over rule):\n", + " [CA regs Oct 2017], § 95911(g)\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # cut-off date: if allowances with unsold_di of this date or earlier remain unsold, they roll over to APCR\n", + " # this function runs after current auction in cq.date, \n", + " # so any remaining unsold for 2 years at the time the function runs...\n", + " # ... will still be unsold at the start of the next quarter, at which point they'll be unsold > 2 years\n", + " cut_off_date = quarter_period(f\"{cq.date.year-2}Q{cq.date.quarter}\")\n", + " \n", + " # get unsold CA state-owned current allowances in auct_hold, with unsold_di > 2 years (24 mo.) earlier\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='CA'\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask5 = all_accts.index.get_level_values('unsold_di') <= cut_off_date # see note below\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " \n", + " # note: for applying cut_off_date, should be sufficient to use unsold_di==cut_off_date, but used <= to be safe\n", + "\n", + " # unsold to transfer to APCR\n", + " df = all_accts.loc[mask]\n", + " \n", + " mapping_dict = {'acct_name': 'APCR_acct', \n", + " 'auct_type': 'reserve'}\n", + " df = multiindex_change(df, mapping_dict)\n", + " \n", + " unsold_to_transfer = df.copy()\n", + " \n", + " # concat unsold_to_transfer with all_accts remainder\n", + " all_accts = pd.concat([all_accts.loc[~mask], unsold_to_transfer], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'unsold transfer to APCR'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"in {cq.date}: transferred {unsold_to_transfer['quant'].sum()} M unsold to APCR\")\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_auction_cur_QC_all_accts(all_accts):\n", + " \"\"\"\n", + " Processes current auction for QC, applying the specified order of sales (when auctions don't sell out).\n", + " \n", + " Order of sales based on regs:\n", + " QC: _______ \n", + " \n", + " Notes: Once it is confirmed to be working properly, this could be simplified by:\n", + " 1. not re-doing filtering from scratch each batch of allowances\n", + " 2. finishing new fn avail_to_sold_all_accts, to avoid repetitive code\n", + " \n", + " \"\"\"\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: check that all available allowances are in auct_hold\n", + " avail_for_test = all_accts.loc[all_accts.index.get_level_values('status')=='available']\n", + " avail_for_test_accts = avail_for_test.index.get_level_values('acct_name').unique().tolist()\n", + " if avail_for_test.empty == False:\n", + " if avail_for_test_accts != ['auct_hold']:\n", + " print(f\"{prmt.test_failed_msg} Some available allowances were in an account other than auct_hold. Here's available:\")\n", + " print(avail_for_test)\n", + " else: # avail_for_test_accts == ['auct_hold']\n", + " pass\n", + " else: # avail_for_test.empty == True\n", + " print(\"Warning\" + \"! avail_for_test is empty.\")\n", + " # END OF TEST\n", + " \n", + " # get sales % for current auctions, for this juris, for cq.date\n", + " # (works for auctions whether linked or unlinked, i.e., QC-only and CA-QC)\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains('QC')) &\n", + " (df.index.get_level_values('auct_type')=='current')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " sales_fract_cur_1j_1q = df.at[cq.date]\n", + " \n", + " # get current available allowances\n", + " # (it should be that all available allowances are in auct_hold)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='QC'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " cur_avail_QC_1q = all_accts.loc[mask]\n", + " \n", + " not_cur_avail_QC_1q = all_accts.loc[~mask]\n", + " \n", + " if sales_fract_cur_1j_1q == 1.0:\n", + " # all available allowances are sold and transferred into gen_acct\n", + " cur_sold_QC_1q = cur_avail_QC_1q\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " cur_sold_QC_1q = multiindex_change(cur_sold_QC_1q, mapping_dict)\n", + " \n", + " # recombine\n", + " all_accts = pd.concat([cur_sold_QC_1q, not_cur_avail_QC_1q])\n", + " \n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'after QC sell-out'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " else: # sales_fract_cur_1j_1q != 1.0:\n", + " # calculate quantity of QC allowances sold (and test that variable is a float)\n", + " cur_sold_1q_tot_QC = cur_avail_QC_1q['quant'].sum() * sales_fract_cur_1j_1q\n", + " test_if_value_is_float_or_np_float64(cur_sold_1q_tot_QC)\n", + "\n", + " # remaining: un-accumulator for all QC sales; initialize here; will be updated repeatedly below\n", + " cur_remaining_to_sell_1q_QC = cur_sold_1q_tot_QC.copy()\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: for QC, reintro are first\n", + "\n", + " # extract reintro allowances from all_accts\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='QC'\n", + " mask5 = all_accts.index.get_level_values('newness')=='reintro'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " reintro_avail_1q = all_accts.loc[mask]\n", + " not_reintro_avail_1q = all_accts.loc[~mask]\n", + "\n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + " \n", + " # start by creating df from avail, with values zeroed out\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " reintro_avail_1q = reintro_avail_1q.sort_index()\n", + " \n", + " reintro_sold_1q = reintro_avail_1q.copy()\n", + " reintro_sold_1q['quant'] = float(0)\n", + " \n", + " for row in reintro_avail_1q.index:\n", + " in_stock_row = reintro_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_QC)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_QC = cur_remaining_to_sell_1q_QC - sold_from_row_quantity\n", + "\n", + " # update sold quantity & metadata\n", + " reintro_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update reintro_avail_1q quantity (but not metadata)\n", + " reintro_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + "\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " reintro_sold_1q = multiindex_change(reintro_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " reintro_unsold_1q = reintro_avail_1q\n", + "\n", + " # recombine\n", + " all_accts = pd.concat([reintro_sold_1q,\n", + " reintro_unsold_1q,\n", + " not_reintro_avail_1q], sort=False)\n", + " \n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'after reintro sold'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: state-owned allowances available for first time as current (including fka adv, if there are any)\n", + "\n", + " # extract state allowances new to current auctions (from all_accts)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='QC'\n", + " mask5 = all_accts.index.get_level_values('newness')=='new'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " new_avail_1q = all_accts.loc[mask]\n", + " not_new_avail_1q = all_accts.loc[~mask]\n", + "\n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + " \n", + " # start by creating df from avail, with values zeroed out\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " new_avail_1q = new_avail_1q.sort_index()\n", + " \n", + " new_sold_1q = new_avail_1q.copy()\n", + " new_sold_1q['quant'] = float(0)\n", + " \n", + " for row in new_avail_1q.index:\n", + " in_stock_row = new_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_QC)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_QC = cur_remaining_to_sell_1q_QC - sold_from_row_quantity\n", + "\n", + " # update sold quantity & metadata\n", + " new_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update new_avail_1q quantity (but not metadata)\n", + " new_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + " # using all_accts:\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " new_sold_1q = multiindex_change(new_sold_1q, mapping_dict)\n", + " \n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " new_unsold_1q = new_avail_1q\n", + "\n", + " # recombine & groupby sum\n", + " all_accts = pd.concat([new_sold_1q,\n", + " new_unsold_1q,\n", + " not_new_avail_1q], \n", + " sort=False)\n", + " # all_accts = all_accts.groupby(prmt.standard_MI_names).sum()\n", + " \n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'after newly available sold'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # update status for all unsold\n", + " all_accts = unsold_update_status(all_accts)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'after update sold'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " # filter out rows with zero or fractional allowances (or NaN)\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)].dropna()\n", + " all_accts = all_accts.dropna()\n", + "\n", + " # end of if-else statement that began \"if sales_fract_cur_1j_1q == 1.0)\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)\n", + "# end of process_auction_cur_QC_all_accts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create current quarter (cq) as object (instance of class Cq)\n", + "class Cq:\n", + " def __init__(self, date):\n", + " self.date = date\n", + " \n", + " def step_to_next_quarter(self):\n", + " if self.date < prmt.model_end_date:\n", + " self.date = (pd.to_datetime(f'{self.date.year}Q{self.date.quarter}') + DateOffset(months=3)).to_period('Q')\n", + " else:\n", + " pass\n", + " \n", + "# create new object qc\n", + "cq = Cq(quarter_period('2012Q4'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# update values in object prmt using functions\n", + "\n", + "prmt.CA_cap = initialize_CA_cap()\n", + "prmt.CA_APCR_MI = initialize_CA_APCR()\n", + "prmt.CA_advance_MI = initialize_CA_advance()\n", + "prmt.VRE_reserve_MI = initialize_VRE_reserve()\n", + "\n", + "# get input: historical quarterly auction data\n", + "get_qauct_hist()\n", + "\n", + "# get input: historical + projected quarterly auction data\n", + "# sets object attribute prmt.auction_sales_pcts_all\n", + "get_auction_sales_pcts_all()\n", + " \n", + "# set qauct_new_avail; sets object attribute prmt.qauct_new_avail\n", + "create_qauct_new_avail()\n", + "\n", + "# set compliance_events; sets object attribute prmt.compliance_events\n", + "get_compliance_events()\n", + "\n", + "# ~~~~~~~~~~~~\n", + "\n", + "# sets object attributes prmt.CIR_historical & prmt.CIR_offsets_q_sums\n", + "get_CIR_data_and_clean()\n", + "\n", + "# get historical data for VRE; assume no more retirements\n", + "# sets object attribute prmt.VRE_retired\n", + "get_VRE_retired_from_CIR()\n", + "\n", + "# ~~~~~~~~~~~~\n", + "\n", + "# initialization of allocations\n", + "CA_alloc_data = read_CA_alloc_data()\n", + "elec_alloc_IOU, elec_alloc_POU = initialize_elec_alloc()\n", + "nat_gas_alloc = initialize_nat_gas_alloc(CA_alloc_data)\n", + "industrial_etc_alloc = initialize_industrial_etc_alloc(CA_alloc_data)\n", + "\n", + "# initialization of consignment vs. non-consignment\n", + "# run fn create_consign_historical_and_projection_annual (many returns)\n", + "consign_ann, consign_elec_IOU, consign_nat_gas, consign_elec_POU, nat_gas_not_consign, elec_POU_not_consign = create_consign_historical_and_projection_annual(\n", + " elec_alloc_IOU, elec_alloc_POU, nat_gas_alloc)\n", + "\n", + "# upsample consignment; sets object attribute prmt.consign_hist_proj\n", + "consign_upsample_historical_and_projection(consign_ann)\n", + "\n", + "# convert all allocations into MI (for all vintages) & put into one df; set as object attribute prmt.CA_alloc_MI_all\n", + "CA_alloc_consign_dfs = [consign_elec_IOU, consign_elec_POU, consign_nat_gas]\n", + "CA_alloc_dfs_not_consign = [industrial_etc_alloc, elec_POU_not_consign, nat_gas_not_consign]\n", + "CA_alloc_dfs = CA_alloc_consign_dfs + CA_alloc_dfs_not_consign\n", + "CA_alloc_MI_list = []\n", + "for alloc in CA_alloc_dfs:\n", + " alloc_MI = convert_ser_to_df_MI_CA_alloc(alloc)\n", + " CA_alloc_MI_list += [alloc_MI]\n", + "prmt.CA_alloc_MI_all = pd.concat(CA_alloc_MI_list)\n", + "\n", + "# ~~~~~~~~~~~~\n", + "\n", + "prmt.QC_cap, QC_advance, QC_APCR = get_QC_inputs()\n", + "prmt.QC_APCR_MI = convert_ser_to_df_MI(QC_APCR)\n", + "prmt.QC_advance_MI = convert_ser_to_df_MI(QC_advance)\n", + "\n", + "get_QC_allocation_data()\n", + "# sets object attributes prmt.QC_alloc_initial, prmt.QC_alloc_trueups, prmt.QC_alloc_full_proj" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# DEFINE CLASSES, CREATE OBJECTS\n", + "\n", + "# ~~~~~~~~~~~\n", + "# initialization\n", + "class Scenario_juris:\n", + " def __init__(self, \n", + " avail_accum, \n", + " cur_sell_out_counter, \n", + " reintro_eligibility, \n", + " snaps_CIR, \n", + " snaps_end):\n", + " self.avail_accum = avail_accum # initialize as empty\n", + " self.cur_sell_out_counter = cur_sell_out_counter # initialize as empty\n", + " self.reintro_eligibility = reintro_eligibility # initialize as empty\n", + " self.snaps_CIR = snaps_CIR # initialize as empty\n", + " self.snaps_end = snaps_end # initialize as empty\n", + "\n", + "# make an instance of Scenario for CA hindcast starting in 2012Q4\n", + "scenario_CA = Scenario_juris(\n", + " avail_accum=prmt.standard_MI_empty.copy(),\n", + " cur_sell_out_counter=0,\n", + " reintro_eligibility=False,\n", + " snaps_CIR=[],\n", + " snaps_end=[],\n", + ")\n", + "logging.info(\"created object scenario_CA\")\n", + "\n", + "# make an instance of Scenario for QC hindcast starting in 2013Q4\n", + "scenario_QC = Scenario_juris(\n", + " avail_accum=prmt.standard_MI_empty.copy(),\n", + " cur_sell_out_counter=0,\n", + " reintro_eligibility=False, \n", + " snaps_CIR=[],\n", + " snaps_end=[],\n", + ")\n", + "logging.info(\"created object scenario_QC\")\n", + "\n", + "# ~~~~~~~~~~~\n", + "# create class Em_pct\n", + "class Em_pct:\n", + " def __init__(self, slider): # default\n", + " self.slider = slider # this attribute will be set equal to a widget, which has various properties (value, etc.)\n", + "\n", + "# create new objects (instances of Em_pct); these are empty but will be filled by fn emissions_pct_sliders\n", + "em_pct_CA_simp = Em_pct([])\n", + "em_pct_QC_simp = Em_pct([])\n", + "em_pct_CA_adv1 = Em_pct([]) # period 1, i.e., 2019-2020\n", + "em_pct_QC_adv1 = Em_pct([])\n", + "em_pct_CA_adv2 = Em_pct([]) # period 2, i.e., 2021-2025\n", + "em_pct_QC_adv2 = Em_pct([])\n", + "em_pct_CA_adv3 = Em_pct([]) # period 3, i.e., 2026-2030\n", + "em_pct_QC_adv3 = Em_pct([])\n", + "\n", + "# ~~~~~~~~~~~\n", + "class Em_text_input_CAQC():\n", + " def __init__(self, wid):\n", + " self.wid = wid # this attribute will be set equal to a widget, which has various properties (value, etc.)\n", + "\n", + "# create new object (instance of the class Em_text_input_CAQC)\n", + "# starts empty, but will be filled by fn ______\n", + "em_text_input_CAQC_obj = Em_text_input_CAQC([]) \n", + "\n", + "# ~~~~~~~~~~~\n", + "class Years_not_sold_out:\n", + " def __init__(self, wid):\n", + " self.wid = wid # this attribute will be set equal to a widget, which has various properties (value, etc.)\n", + " \n", + "# create new object (instance of the class Years_not_sold_out)\n", + "# starts empty, but will be filled by fn create_auction_tabs\n", + "years_not_sold_out_obj = Years_not_sold_out([])\n", + "\n", + "# ~~~~~~~~~~~\n", + "class Fract_not_sold():\n", + " def __init__(self, wid):\n", + " self.wid = wid # this attribute will be set equal to a widget, which has various properties (value, etc.)\n", + " \n", + "# create new object (instance of the class Fract_not_sold)\n", + "# starts empty, but will be filled by fn create_auction_tabs\n", + "fract_not_sold_obj = Fract_not_sold([])\n", + "\n", + "# ~~~~~~~~~~~\n", + "# create class Off_pct\n", + "class Off_pct:\n", + " def __init__(self, slider): # default\n", + " self.slider = slider # this attribute will be set equal to a widget, which has various properties (value, etc.)\n", + "\n", + "# create new objects (instances of off_pct); these are empty but will be filled by fn create_offsets_pct_sliders\n", + "off_pct_of_limit_CAQC = Off_pct([])\n", + "\n", + "off_pct_CA_adv1 = Off_pct([]) # period 1, i.e., 2019-2020\n", + "off_pct_QC_adv1 = Off_pct([])\n", + "\n", + "off_pct_CA_adv2 = Off_pct([]) # period 2, i.e., 2021-2025\n", + "off_pct_QC_adv2 = Off_pct([])\n", + "\n", + "off_pct_CA_adv3 = Off_pct([]) # period 3, i.e., 2026-2030\n", + "off_pct_QC_adv3 = Off_pct([])\n", + "\n", + "# ~~~~~~~~~~~\n", + "# define class Progress_bar\n", + "class Progress_bar:\n", + " bar = \"\" \n", + " \n", + " def __init__(self, wid):\n", + " self.wid = wid # object will be instantiated using iPython widget as wid\n", + " \n", + " def create_progress_bar(wid):\n", + " progress_bar = Progress_bar(wid)\n", + " return progress_bar\n", + "\n", + "# create objects\n", + "progress_bar_CA = Progress_bar.create_progress_bar(\n", + " widgets.IntProgress(\n", + " value=prmt.progress_bar_CA_count,\n", + " min=0,\n", + " max=len(prmt.CA_quarters),\n", + " step=1,\n", + " description='California:',\n", + " bar_style='', # 'success', 'info', 'warning', 'danger' or ''\n", + " orientation='horizontal',\n", + " ))\n", + "\n", + "progress_bar_QC = Progress_bar.create_progress_bar(\n", + " widgets.IntProgress(\n", + " value=prmt.progress_bar_QC_count,\n", + " min=0,\n", + " max=len(prmt.QC_quarters),\n", + " step=1,\n", + " description='Quebec:',\n", + " bar_style='', # 'success', 'info', 'warning', 'danger' or ''\n", + " orientation='horizontal',\n", + " ))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# START OF MODEL RUN" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "## USER PARAMETERS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def assign_EIM_retirements():\n", + " \"\"\"\n", + " Assign quantities for EIM Outstanding Emissions retirements in 2018, 2019, and 2020.\n", + " \n", + " These are for EIM Outstanding Emissions incurred in 2017, 2018, and 2019Q1.\n", + " \n", + " As of Oct. 2018, there was no clear data on quantities to be retired for EIM Outstanding Emissions.\n", + " \n", + " Therefore values here are set to zero until more information is available.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " EIM_retirements_dict = {2018: 0, \n", + " 2019: 0, \n", + " 2020: 0 / 4}\n", + " \n", + " EIM_retirements = pd.Series(EIM_retirements_dict)\n", + " EIM_retirements.name = 'EIM_retirements'\n", + " EIM_retirements.index.name = 'year processed'\n", + " \n", + " return(EIM_retirements)\n", + " \n", + "# ~~~~~~~~~~~~~~~~~~\n", + "\n", + "def assign_bankruptcy_retirements():\n", + " \"\"\"\n", + " Handling of bankruptcy retirements based on \"2018 Regulation Documents (Narrow Scope)\": \n", + " https://www.arb.ca.gov/regact/2018/capandtradeghg18/capandtradeghg18.htm\n", + " \n", + " Quantity for 2019 based on ARB statement in ARB, \"Supporting Material for Assessment of Post-2020 Caps\" (Apr 2018):\n", + " https://www.arb.ca.gov/cc/capandtrade/meetings/20180426/carb_post2020caps.pdf\n", + " \"Approximately 5 million allowances to be retired in response to a recent bankruptcy\"\n", + " \"\"\"\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # bankruptcy retirements (units MMTCO2e)\n", + " # add additional key and value pairs below, if more bankruptcies are identified\n", + " bankruptcy_retirements_dict = {2019: 5}\n", + "\n", + " bankruptcy_retirements = pd.Series(bankruptcy_retirements_dict)\n", + " bankruptcy_retirements.name = 'bankruptcy_retirements'\n", + " bankruptcy_retirements.index.name = 'year processed'\n", + " \n", + " return(bankruptcy_retirements)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_all_accts():\n", + " \"\"\"\n", + " Create version of df all_accts for start of model run, for each juris (CA & QC).\n", + " \n", + " What is in this df at start of run depends on the time point at which the model run begins.\n", + " \n", + " Default is to use historical data + projection of all auctions selling out. \n", + " \n", + " Model may run as forecast, in which case it defaults to pre-run results for all auctions selling out.\n", + " \n", + " Or model may run as hindcast + forecast, in which case it repeats historical steps.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " # if there are user settings from online model, override defaults above\n", + " # but do this only if years_not_sold_out is *not* and empty list & prmt.fract_not_sold > 0.0\n", + " # if prmt.years_not_sold_out is empty and/or prmt.fract_not_sold==1.0, then don't need to do run\n", + " # already have results in pre-run scenario in which all auctions after 2018Q3 sell out\n", + "\n", + " # should be able to remove wrapping if statement below; \n", + " # this func (initialize_all_accts) is called only if these conditions were already found to be true\n", + " \n", + " if prmt.run_hindcast == True: \n", + " # for initial conditions of market\n", + " # set attributes of objects scenario_CA and scenario_QC\n", + " # note that scenario attribute snaps_end is a *list* of dfs\n", + " scenario_CA.avail_accum = prmt.standard_MI_empty.copy()\n", + " scenario_CA.cur_sell_out_counter = 0\n", + " scenario_CA.reintro_eligibility = False\n", + " scenario_CA.snaps_end = []\n", + " logging.info(\"initialized scenario_CA attributes for hindcast\")\n", + " \n", + " scenario_QC.avail_accum = prmt.standard_MI_empty.copy()\n", + " scenario_QC.cur_sell_out_counter = 0\n", + " scenario_QC.reintro_eligibility = False\n", + " scenario_QC.snaps_end = []\n", + " logging.info(\"initialized scenario_QC attributes for hindcast\")\n", + "\n", + " # initialize all_accts_CA & all_accts_QC\n", + " all_accts_CA = prmt.standard_MI_empty.copy()\n", + " all_accts_QC = prmt.standard_MI_empty.copy()\n", + "\n", + " elif prmt.run_hindcast == False and prmt.years_not_sold_out != () and prmt.fract_not_sold > 0.0: \n", + " \n", + " # DELETE; no need to reinitialize\n", + "# # reinitialize all_accts_CA & all_accts_QC\n", + "# all_accts_CA = prmt.standard_MI_empty.copy()\n", + "# all_accts_QC = prmt.standard_MI_empty.copy()\n", + "\n", + " # get the first year of projection with auctions that don't sell out\n", + " first_proj_yr_not_sold_out = prmt.years_not_sold_out[0]\n", + "\n", + " # set new values for start dates\n", + " # use default projection (all sell out) for all years that sell out\n", + " # start calculating projection from first year that doesn't sell out\n", + " prmt.CA_start_date = quarter_period(f\"{first_proj_yr_not_sold_out}Q1\")\n", + " prmt.QC_start_date = quarter_period(f\"{first_proj_yr_not_sold_out}Q1\")\n", + " \n", + " # CA: generate (revised) list of quarters to iterate over (inclusive)\n", + " # range has DateOffset(months=3) at the end, because end of range is not included in the range generated\n", + " prmt.CA_quarters = pd.date_range(start=quarter_period(prmt.CA_start_date).to_timestamp(), \n", + " end=quarter_period(prmt.CA_end_date).to_timestamp() + DateOffset(months=3),\n", + " freq='Q').to_period('Q')\n", + "\n", + " # QC: generate (revised) list of quarters to iterate over (inclusive)\n", + " # range has DateOffset(months=3) at the end, because end of range is not included in the range generated\n", + " prmt.QC_quarters = pd.date_range(start=quarter_period(prmt.QC_start_date).to_timestamp(),\n", + " end=quarter_period(prmt.QC_end_date).to_timestamp() + DateOffset(months=3),\n", + " freq='Q').to_period('Q')\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # get output of model results for historical + projection of all sell out \n", + " # get from object attribute prmt.snaps_end_Q4\n", + " \n", + " # create mask for only the years snap_q.year < first_proj_yr_not_sold_out\n", + " # prmt.snaps_end_Q4 has snaps_q as column next to 'quant'\n", + " # this makes it ready to use as attribute (Scenario.snaps_end)\n", + " test_snaps_end_Q4_sum()\n", + " up_to_year_mask = prmt.snaps_end_Q4['snap_q'].dt.year < first_proj_yr_not_sold_out\n", + "\n", + " # create masks for each juris\n", + " CA_mask = prmt.snaps_end_Q4.index.get_level_values('juris') == 'CA'\n", + " QC_mask = prmt.snaps_end_Q4.index.get_level_values('juris') == 'QC'\n", + " \n", + " # use these masks below to set object attributes scenario_CA.snaps_end and scenario_QC.snaps_end\n", + " \n", + " # ~~~~~~~\n", + " \n", + " # snaps_CIR\n", + " # if restoring snaps_CIR, follow same pattern as above for snaps_end\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # TO DO: add avail_accum from all sell out scenario\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # set attributes of objects scenario_CA and scenario_QC\n", + " # note that scenario attribute snaps_end is a *list* of dfs\n", + " scenario_CA.avail_accum = prmt.standard_MI_empty.copy() # need to fill in with results prior to start_date\n", + " scenario_CA.cur_sell_out_counter = 5 # was 4 after 2018Q3; would be > 4 if more auctions sold out\n", + " scenario_CA.reintro_eligibility = True\n", + " scenario_CA.snaps_end = [prmt.snaps_end_Q4.loc[(up_to_year_mask) & (CA_mask)]]\n", + " logging.info(\"updated scenario_CA attributes\")\n", + " \n", + " scenario_QC.avail_accum = prmt.standard_MI_empty.copy() # need to fill in with results prior to start_date\n", + " scenario_QC.cur_sell_out_counter = 5 # was 4 after 2018Q3; would be > 4 if more auctions sold out\n", + " scenario_QC.reintro_eligibility = True\n", + " scenario_QC.snaps_end = [prmt.snaps_end_Q4.loc[(up_to_year_mask) & (QC_mask)]]\n", + " logging.info(\"updated scenario_QC attributes\")\n", + "\n", + " # create mask to choose 1 quarter to use as starting point for model run\n", + " one_quarter_mask = prmt.snaps_end_Q4['snap_q'].dt.year == first_proj_yr_not_sold_out - 1\n", + "\n", + " # use together with jurisdiction masks created earlier\n", + " \n", + " # select only the one quarter; drop column 'snap_q'\n", + " all_accts_CA = prmt.snaps_end_Q4.loc[(one_quarter_mask) & (CA_mask)]\n", + " all_accts_CA = all_accts_CA.drop(columns=['snap_q'])\n", + " \n", + " all_accts_QC = prmt.snaps_end_Q4.loc[(one_quarter_mask) & (QC_mask)]\n", + " all_accts_QC = all_accts_QC.drop(columns=['snap_q'])\n", + " \n", + " elif prmt.run_hindcast == False and prmt.years_not_sold_out != () and prmt.fract_not_sold > 0.0:\n", + " # use pre-run scenario for supply-demand balance\n", + " # drawn directly from prmt.snaps_end_Q4 [ck]\n", + " pass\n", + " \n", + " else:\n", + " print(\"Unknown condition; shouldn't have reached here.\")\n", + " \n", + " # end of \"prmt.years_not_sold_out != () ... \"\n", + " \n", + " # DELETE: don't need this additional UI step; this function goes quickly now\n", + "# print(\"Processing auctions...\", end=' ') # for UI\n", + " \n", + " # note: after this function finishes, model next runs process_auctions_CA_QC, process_CA & process_QC\n", + " # then goes into supply_demand_calculations\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts_CA, all_accts_QC)\n", + "\n", + "# end of initialize_all_accts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def progress_bars_initialize_and_display():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # for CA, create progress bar using iPython widget IntProgress\n", + " # at end of each quarter, value increases by 1\n", + " prmt.progress_bar_CA_count = 0 # reinitialize widget progress bar count\n", + " progress_bar_CA.wid.value = prmt.progress_bar_CA_count\n", + " progress_bar_CA.wid.max = len(prmt.CA_quarters)\n", + " \n", + " display(progress_bar_CA.wid) # display the bar\n", + "\n", + " # for QC, create progress bar using iPython widget IntProgress\n", + " # at end of each quarter, value increases by 1\n", + " prmt.progress_bar_QC_count = 0 # reinitialize widget progress bar\n", + " progress_bar_QC.wid.value = prmt.progress_bar_QC_count\n", + " progress_bar_QC.wid.max = len(prmt.QC_quarters)\n", + "\n", + " display(progress_bar_QC.wid) # display the bar\n", + " \n", + " # note: bars' values are updated at end of each step through loop\n", + " \n", + "# end of progress_bars_initialize_and_display" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## California" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# %%snakeviz\n", + "\n", + "def process_CA(all_accts_CA):\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # set cq for CA\n", + " # in initialize_all_accts, if online user settings, then sets new value for CA_start_date \n", + " # (using first_proj_yr_not_sold_out)\n", + " cq.date = prmt.CA_start_date\n", + "\n", + " for quarter_year in prmt.CA_quarters:\n", + " logging.info(f\"******** start of {quarter_year} ********\")\n", + "\n", + " # ONE-OFF STEPS:\n", + " # PREP FOR process_CA_quarterly\n", + " if cq.date == quarter_period('2012Q4'):\n", + " all_accts_CA = initialize_CA_auctions(all_accts_CA)\n", + " else:\n", + " pass\n", + " \n", + " # decadal creation of allowances & transfers\n", + " if cq.date == quarter_period('2018Q1'):\n", + " # occurs before process_CA_quarterly for 2018Q1, and therefore included in 2018Q1 snap\n", + " # by including it here before any other steps in 2018, then can have consistent budget for 2018\n", + "\n", + " # create CA, QC, and ON allowances v2021-v2030, put into alloc_hold\n", + " all_accts_CA = create_annual_budgets_in_alloc_hold(all_accts_CA, prmt.CA_cap.loc[2021:2030])\n", + "\n", + " # transfer advance into auct_hold\n", + " all_accts_CA = transfer__from_alloc_hold_to_specified_acct(all_accts_CA, prmt.CA_advance_MI, \n", + " 2021, 2030)\n", + "\n", + " # projection that APCR 2021-2030 will occur in 2018Q4\n", + " elif cq.date == quarter_period('2018Q4'): \n", + " # transfer CA APCR allowances out of alloc_hold, into APCR_acct (for vintages 2021-2030)\n", + " # (not yet; CA APCR allowances still in alloc_hold, as of 2018Q2 CIR)\n", + " # note: quantities in APCR 2021-2030 are affected by model setting for CA_post_2020_regs\n", + " all_accts_CA = transfer__from_alloc_hold_to_specified_acct(all_accts_CA, prmt.CA_APCR_MI, \n", + " 2021, 2030)\n", + " else:\n", + " pass\n", + "\n", + " # ***** PROCESS QUARTER FOR cq.date (START) *****\n", + "\n", + " all_accts_CA = process_CA_quarterly(all_accts_CA)\n", + "\n", + " # ***** PROCESS QUARTER FOR cq.date (END) *****\n", + "\n", + " # update progress bar\n", + " if progress_bar_CA.wid.value <= len(prmt.CA_quarters):\n", + " progress_bar_CA.wid.value += 1\n", + " \n", + " # at end of each quarter, step cq.date to next quarter\n", + " cq.step_to_next_quarter()\n", + "\n", + " logging.info(f\"******** end of {cq.date} ********\")\n", + " logging.info(\"------------------------------------\")\n", + " \n", + " # end of loop \"for quarter_year in prmt.CA_quarters:\"\n", + " \n", + " return(all_accts_CA)\n", + "# end of process CA quarters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Quebec" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# %%snakeviz\n", + "\n", + "def process_QC(all_accts_QC):\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # initialize cq.date to QC_start_date\n", + " # in initialize_all_accts, if online user settings, then sets new value for QC_start_date \n", + " # (using first_proj_yr_not_sold_out)\n", + " cq.date = prmt.QC_start_date\n", + " \n", + " for quarter_year in prmt.QC_quarters:\n", + " logging.info(f\"******** start of {cq.date} ********\")\n", + "\n", + " # one-off steps **************************\n", + " if cq.date == quarter_period('2013Q4'):\n", + " # initialize QC auctions\n", + " all_accts_QC = initialize_QC_auctions_2013Q4(all_accts_QC)\n", + "\n", + " elif cq.date == quarter_period('2014Q1'):\n", + " # Early Action allowances distributed 2014Q1\n", + " all_accts_QC = QC_early_action_distribution(all_accts_QC)\n", + " \n", + " elif cq.date == quarter_period('2018Q1'):\n", + " # occurs before process_QC_quarterly for 2018Q1, and therefore included in 2018Q1 snap\n", + " # by including it here before any other steps in 2018, then can have consistent budget for 2018\n", + " \n", + " # decadal creation of allowances & transfers\n", + " # create QC, QC, and ON allowances v2021-v2030, put into alloc_hold\n", + " all_accts_QC = create_annual_budgets_in_alloc_hold(all_accts_QC, prmt.QC_cap.loc[2021:2030])\n", + "\n", + " # transfer QC APCR allowances out of alloc_hold, into APCR_acct (for vintages 2021-2030)\n", + " # (QC APCR allowances still in alloc_hold, as of 2018Q2 CIR)\n", + " all_accts_QC = transfer__from_alloc_hold_to_specified_acct(all_accts_QC, prmt.QC_APCR_MI, 2021, 2030)\n", + "\n", + " # transfer advance into auct_hold\n", + " all_accts_QC = transfer__from_alloc_hold_to_specified_acct(all_accts_QC, prmt.QC_advance_MI, 2021, 2030)\n", + " \n", + " else:\n", + " pass\n", + " # end of one-off steps **************************\n", + " \n", + "\n", + " # ***** PROCESS QUARTER FOR cq.date *****\n", + " \n", + " all_accts_QC = process_QC_quarterly(all_accts_QC)\n", + " \n", + " # ***** END OF PROCESS QUARTER FOR cq.date *****\n", + " \n", + " # update progress bar\n", + " if progress_bar_QC.wid.value <= len(prmt.QC_quarters):\n", + " progress_bar_QC.wid.value += 1\n", + " \n", + " # at end of each quarter, move cq.date to next quarter\n", + " cq.step_to_next_quarter()\n", + " \n", + " logging.info(f\"******** end of {cq.date} ********\")\n", + " logging.info(\"------------------------------------\")\n", + " \n", + " # end of loops \"for quarter_year in prmt.QC_quarters:\"\n", + " \n", + " return(scenario_QC, all_accts_QC)\n", + "\n", + "# end of process QC quarters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CA-QC results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_auctions_CA_QC():\n", + " \"\"\"\n", + " Overall function to run all initialization steps, then auctions etc. for CA & QC.\n", + " \n", + " Default is to revert to pre-run scenario in which all auctions after 2018Q3 sell out.\n", + " \n", + " Only run auctions if there are auctions that do not sell out.\n", + " \"\"\"\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " print(\"Running auctions:\", end=' ') # for UI\n", + " \n", + " # initialize all_accts for both CA & QC\n", + " all_accts_CA, all_accts_QC = initialize_all_accts()\n", + "\n", + " # create progress bars using updated start dates and quarters\n", + " progress_bars_initialize_and_display()\n", + "\n", + " # process quarters for CA & QC\n", + " all_accts_CA = process_CA(all_accts_CA)\n", + " all_accts_QC = process_QC(all_accts_QC)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts_CA, all_accts_QC)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "if prmt.run_hindcast == True:\n", + " # then do hindcast from the start of the WCI system\n", + " # (model default is for prmt.run_hindcast = False)\n", + " all_accts_CA, all_accts_QC = process_auctions_CA_QC()\n", + "else: \n", + " # then prmt.run_hindcast == False\n", + " # using pre-run scenario prmt.snaps_end_Q4 (in which all sell out) as starting point\n", + " # all_accts_CA, all_accts_QC were set in initialize_all_accts\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# BANKING METRIC" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def emissions_projection():\n", + " \"\"\"\n", + " Calculate projection for covered emissions based on user settings.\n", + " \n", + " Default is -2%/year change for both CA and QC.\n", + " \n", + " Default based on PATHWAYS projection in Scoping Plan case that \"covered sector\" emissions change would be ~ -2%/yr.\n", + " \n", + " Although \"covered sector\" emissions ~10% higher than covered emissions, for annual change it's a good proxy.\n", + " \n", + " Model assumes QC will make same annual change as CA.\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + "\n", + " cov_em_df = pd.read_excel(prmt.input_file, sheet_name='covered emissions')\n", + " cov_em_df = cov_em_df.drop(['source CA', 'source QC', 'units'], axis=1)\n", + " cov_em_df = cov_em_df.set_index('year')\n", + "\n", + " # convert each into Series\n", + " CA_em_hist = cov_em_df['CA'].dropna()\n", + " QC_em_hist = cov_em_df['QC'].dropna()\n", + "\n", + " # create new Series, which will have projections appended\n", + " CA_em_all = CA_em_hist.copy()\n", + " QC_em_all = QC_em_hist.copy()\n", + "\n", + " last_hist_year = CA_em_hist.index[-1]\n", + " \n", + " if emissions_tabs.selected_index == 0:\n", + " # simple settings\n", + " logging.info(\"using emissions settings (simple)\")\n", + " # get user specified emissions annual change\n", + " # calculate emissions trajectories to 2030\n", + " for year in range(last_hist_year+1, 2030+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + em_pct_CA_simp.slider.value)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + em_pct_QC_simp.slider.value)\n", + " \n", + " elif emissions_tabs.selected_index == 1:\n", + " # advanced settings\n", + " logging.info(\"using emissions settings (advanced)\")\n", + " for year in range(last_hist_year+1, 2020+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + em_pct_CA_adv1.slider.value)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + em_pct_QC_adv1.slider.value)\n", + " \n", + " for year in range(2020+1, 2025+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + em_pct_CA_adv2.slider.value)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + em_pct_QC_adv2.slider.value)\n", + " \n", + " for year in range(2025+1, 2030+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + em_pct_CA_adv3.slider.value)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + em_pct_QC_adv3.slider.value)\n", + " \n", + " elif emissions_tabs.selected_index == 2: \n", + " # custom scenario input through text box\n", + " custom = parse_emissions_text(em_text_input_CAQC_obj.wid.value)\n", + " \n", + " if isinstance(custom, str):\n", + " if custom == 'blank' or custom == 'missing_slash_t' or custom == 'misformatted':\n", + " # revert to default; relevant error_msg set in parse_emissions_text\n", + "\n", + " # calculate default\n", + " for year in range(last_hist_year+1, 2030+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + -0.02)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + -0.02)\n", + " else:\n", + " error_msg = \"Error\" + \"! Unknown problem with input (possibly formatting issue). Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg] \n", + "\n", + " elif isinstance(custom, pd.Series):\n", + " if custom.index.min() > 2017 or custom.index.max() < 2030:\n", + " # projection is missing years\n", + "\n", + " error_msg = \"Error\" + \"! Projection needs to cover each year from 2017 to 2030. Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + "\n", + " # calculate default\n", + " for year in range(last_hist_year+1, 2030+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + -0.02)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + -0.02)\n", + "\n", + " elif custom.index.min() <= 2017 and custom.index.max() >= 2030:\n", + " # projection has all needed years\n", + "\n", + " # keep only years from 2017 to 2030\n", + " custom = custom.loc[(custom.index >= 2017) & (custom.index <= 2030)]\n", + "\n", + " # *** ASSUMPTION ***\n", + " # assume that CA emissions are a proportional share of the projected CA+QC emissions\n", + " # proportion is based on CA portion of CA+QC caps (~84.6%) over projection period 2017-2030\n", + " CA_caps_2017_2030 = prmt.CA_cap.loc[2017:2030].sum()\n", + " CAQC_caps_2017_2030 = pd.concat([prmt.CA_cap.loc[2017:2030], prmt.QC_cap.loc[2017:2030]]).sum()\n", + " CA_proportion = CA_caps_2017_2030 / CAQC_caps_2017_2030\n", + "\n", + " CA_em_all = CA_proportion * custom\n", + " QC_em_all = (1 - CA_proportion) * custom\n", + "\n", + " # fill in historical data; don't let user override historical data\n", + " CA_em_all = pd.concat([CA_em_hist.loc[2013:2016], CA_em_all], axis=0)\n", + " QC_em_all = pd.concat([QC_em_hist.loc[2013:2016], QC_em_all], axis=0)\n", + "\n", + " else:\n", + " error_msg = \"Error\" + \"! Unknown problem with input (possibly formatting issue). Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + "\n", + " # end of \"if custom == 'blank'...\"\n", + " \n", + " else: \n", + " # emissions_tabs.selected_index is not 0, 1, or 2\n", + " error_msg = \"Error\" + \"! Tab index is out of permitted range. Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " \n", + " # calculate default\n", + " for year in range(last_hist_year+1, 2030+1):\n", + " CA_em_all.at[year] = CA_em_all.at[year-1] * (1 + -0.02)\n", + " QC_em_all.at[year] = QC_em_all.at[year-1] * (1 + -0.02)\n", + "\n", + " # end of \"if emissions_tabs.selected_index == 0:\"\n", + " \n", + " # set attributes (need jurisdiction emissions for offset calculations)\n", + " prmt.emissions_ann_CA = CA_em_all\n", + " prmt.emissions_ann_QC = QC_em_all\n", + " prmt.emissions_ann = pd.concat([CA_em_all, QC_em_all], axis=1).sum(axis=1)\n", + " \n", + " # set names for all series\n", + " prmt.emissions_ann.name = 'emissions_ann'\n", + " prmt.emissions_ann_CA.name = 'emissions_ann_CA'\n", + " prmt.emissions_ann_QC.name = 'emissions_ann_QC'\n", + " \n", + " # no returns; sets attributes\n", + "# end of emissions_projection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def offsets_projection():\n", + " \"\"\"\n", + " DOCSTRING\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + " \n", + " offsets_sold_hist_cumul = prmt.CIR_offsets_q_sums[['General', 'Compliance', 'Retirement']].sum(axis=1)\n", + " \n", + " # BANKING METRIC: supply: offsets\n", + " # B = A' + N + **O** - E\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # HISTORICAL \n", + " # get quarterly values derived from CIR\n", + " offsets_sold_q = offsets_sold_hist_cumul.diff()\n", + " \n", + " # if a year has only partial quarterly data, make projection for remainder of year\n", + " last_hist_date = offsets_sold_q.index[-1]\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # PROJECTION TO FILL OUT REMAINING QUARTERS IN YEAR WITH PARTIAL DATA\n", + " if last_hist_date.year == 2018:\n", + " # hard-code in projection for 2018\n", + " # because we know there was a very large forestry project issuance in 2017Q4-2018Q1\n", + " for quarter in range(last_hist_date.quarter+1, 4+1):\n", + " # assume after Q2, same as Q2\n", + "\n", + " # get 2018Q2 value\n", + " offsets_2018Q2 = offsets_sold_q.at[quarter_period('2018Q2')]\n", + "\n", + " # calculate projection\n", + " year_q = quarter_period(f'2018Q{quarter}')\n", + " offsets_sold_q.at[year_q] = offsets_2018Q2\n", + "\n", + " else:\n", + " for quarter in range(last_hist_date.quarter+1, 4+1):\n", + " # use average of previous 4 quarters\n", + " offsets_priv_past_4Q = offsets_sold_q.loc[offsets_sold_q.index[-4]:]\n", + " offsets_priv_past_4Q_avg = offsets_priv_past_4Q.sum() / 4\n", + " \n", + " logging.info(\"offsets_priv_past_4Q_avg: {offsets_priv_past_4Q_avg}\")\n", + "\n", + " # use average values to calculate\n", + " year_q = quarter_period(f'{last_hist_date.year}Q{quarter}')\n", + " offsets_sold_q.at[year_q] = offsets_priv_past_4Q_avg\n", + "\n", + " # end of projection to fill out last historical year with partial data\n", + "\n", + " # calculate annual offset totals\n", + " offsets_sold_ann = offsets_sold_q.resample('A').sum()\n", + " offsets_sold_ann.index = offsets_sold_ann.index.year.astype(int)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # PROJECTION BEYOND LAST YEAR WITH HISTORICAL DATA\n", + " # use ARB's assumption for 2021-2030, from Post-2020 analyis\n", + " prmt.offset_rate_fract_of_limit = 0.75\n", + " \n", + " # note that this is about the same as what historical analysis through 2018Q2 gives\n", + " # from 2018Q2 CIR, ~100 M offsets cumulative supply\n", + " # cumulative CA-QC covered emissions through 2018Q2 were 1666 M ***if*** emissions fell 2%/yr after 2016\n", + " # which means offsets issued through 2018Q2 were about 6.0% of emissions\n", + " # with the offset limit for CA & QC over this period set at 8%, \n", + " # then the issuance is 75% of the limit\n", + " # and that's the same as ARB's assumption (although ARB didn't state a rationale)\n", + " \n", + " # the offset_rate_fract_of_limit above also sets the default value in offset sliders\n", + "\n", + " # ~~~~~~~~~\n", + " \n", + " # get values from sliders \n", + " # (before user does first interaction, will be based on default set above)\n", + " \n", + " # print(f\"show offsets_tabs.selected_index for default case: {offsets_tabs.selected_index}\")\n", + " \n", + " if offsets_tabs.selected_index == 0:\n", + " # simple settings\n", + " logging.info(\"using offsets settings (simple)\")\n", + " \n", + " # get user specified offsets use rate, as % of limit (same rate for all periods)\n", + " for year in range(last_hist_date.year+1, 2020+1):\n", + " # for CA & QC together\n", + " offset_rate_ann = off_pct_of_limit_CAQC.slider.value * 0.08\n", + " offsets_sold_ann.at[year] = prmt.emissions_ann.at[year] * offset_rate_ann\n", + " \n", + " for year in range(2020+1, 2025+1):\n", + " # for CA & QC separately\n", + " offset_rate_CA = off_pct_of_limit_CAQC.slider.value * 0.04\n", + " offset_rate_QC = off_pct_of_limit_CAQC.slider.value * 0.08\n", + " \n", + " offsets_sold_ann_CA_1y = prmt.emissions_ann_CA.at[year] * offset_rate_CA\n", + " offsets_sold_ann_QC_1y = prmt.emissions_ann_QC.at[year] * offset_rate_QC\n", + " \n", + " # combine CA & QC\n", + " offsets_sold_ann.at[year] = offsets_sold_ann_CA_1y + offsets_sold_ann_QC_1y\n", + " \n", + " for year in range(2025+1, 2030+1):\n", + " # for CA & QC separately\n", + " offset_rate_CA = off_pct_of_limit_CAQC.slider.value * 0.06\n", + " offset_rate_QC = off_pct_of_limit_CAQC.slider.value * 0.08\n", + " \n", + " offsets_sold_ann_CA_1y = prmt.emissions_ann_CA.at[year] * offset_rate_CA\n", + " offsets_sold_ann_QC_1y = prmt.emissions_ann_QC.at[year] * offset_rate_QC\n", + " \n", + " # combine CA & QC\n", + " offsets_sold_ann.at[year] = offsets_sold_ann_CA_1y + offsets_sold_ann_QC_1y\n", + " \n", + " elif offsets_tabs.selected_index == 1:\n", + " # advanced settings\n", + " logging.info(\"using offsets settings (advanced)\")\n", + " \n", + " for year in range(last_hist_date.year+1, 2020+1):\n", + " # for CA & QC separately, using period 1 sliders\n", + " offset_rate_CA = off_pct_CA_adv1.slider.value\n", + " offset_rate_QC = off_pct_QC_adv1.slider.value\n", + " \n", + " offsets_sold_ann_CA_1y = prmt.emissions_ann_CA.at[year] * offset_rate_CA\n", + " offsets_sold_ann_QC_1y = prmt.emissions_ann_QC.at[year] * offset_rate_QC\n", + " \n", + " # combine CA & QC\n", + " offsets_sold_ann.at[year] = offsets_sold_ann_CA_1y + offsets_sold_ann_QC_1y\n", + " \n", + " for year in range(2020+1, 2025+1):\n", + " # for CA & QC separately, using period 2 sliders\n", + " offset_rate_CA = off_pct_CA_adv2.slider.value\n", + " offset_rate_QC = off_pct_QC_adv2.slider.value\n", + " \n", + " offsets_sold_ann_CA_1y = prmt.emissions_ann_CA.at[year] * offset_rate_CA\n", + " offsets_sold_ann_QC_1y = prmt.emissions_ann_QC.at[year] * offset_rate_QC\n", + " \n", + " # combine CA & QC\n", + " offsets_sold_ann.at[year] = offsets_sold_ann_CA_1y + offsets_sold_ann_QC_1y\n", + " \n", + " for year in range(2025+1, 2030+1):\n", + " # for CA & QC separately, for period 3 sliders\n", + " offset_rate_CA = off_pct_CA_adv3.slider.value\n", + " offset_rate_QC = off_pct_QC_adv3.slider.value\n", + " \n", + " offsets_sold_ann_CA_1y = prmt.emissions_ann_CA.at[year] * offset_rate_CA\n", + " offsets_sold_ann_QC_1y = prmt.emissions_ann_QC.at[year] * offset_rate_QC\n", + " \n", + " # combine CA & QC\n", + " offsets_sold_ann.at[year] = offsets_sold_ann_CA_1y + offsets_sold_ann_QC_1y\n", + "\n", + " else:\n", + " # offsets_tabs.selected_index is not 0 or 1\n", + " print(\"Error\" + \"! offsets_tabs.selected_index was not one of the expected values (0 or 1).\")\n", + "\n", + " offsets_sold_ann.name = 'offsets_sold_ann'\n", + " \n", + " # calculation of excess offsets beyond what could be used\n", + " # (depends on temporal pattern of when offsets are added to supply)\n", + " # sets prmt.excess_offsets\n", + " excess_offsets_calc(offsets_sold_ann)\n", + " \n", + " return(offsets_sold_ann)\n", + "# end of offsets_projection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "def excess_offsets_calc(offsets_sold_ann):\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " # notes:\n", + " # offsets_projection() returns offsets_sold_ann, which is a record of additions to offset supply annually\n", + " # (both historically and projected)\n", + "\n", + " # calculate cumsum to get gross cumulative supply\n", + " offsets_sold_cum = offsets_sold_ann.cumsum()\n", + "\n", + " # to calculate those available to use at any time, remove the historical retirements\n", + " # (those available at any time is cumsum of this series of \"offsets available\" minus retirements to date)\n", + " # generalized: get the retirements from CIR, remove those, starting from earliest offset \"vintage\"\n", + " # note: a more accurate approach might be to get retired offsets from compliance reports\n", + " df = prmt.CIR_offsets_q_sums.copy()\n", + " df = df.loc[df.index[-1:]]\n", + " off_priv_hist_retired = df['Retirement'].sum()\n", + "\n", + " # remove the retired from the cumulative supply; drop rows with negative values\n", + " # note: off_avail_cum will be updated below\n", + " off_avail_cum = offsets_sold_cum - off_priv_hist_retired\n", + "\n", + " # then make projection of max offset retirements, simulating compliance events\n", + " latest_hist_q = prmt.CIR_offsets_q_sums.index.max()\n", + "\n", + " first_year = 2015 # initialize projection of retirements\n", + "\n", + " # compliance period #2 obligations for emissions 2015-2017 (due Nov 1, 2018)\n", + " if latest_hist_q < quarter_period(f'{first_year+3}Q4'):\n", + " # latest CIR data is before 2018Q4, so simulate compliance period #2 obligations\n", + " # max offsets are 8% for both CA & QC; \n", + " # assume ARB allows emitters to go up to max for whole compliance period, regardless of what they retired earlier\n", + "\n", + " max_off_p2_CA = prmt.emissions_ann_CA.loc[first_year:first_year+2].sum() * 0.08\n", + " max_off_p2_QC = prmt.emissions_ann_QC.loc[first_year:first_year+2].sum() * 0.08\n", + "\n", + " # retired to date for compliance for CA for compliance period #2 \n", + " # get record of retirements from annual compliance reports for 2016 & 2017\n", + " df = pd.read_excel(prmt.input_file, sheet_name='annual compliance reports')\n", + " df = df.set_index('year of compliance event')\n", + " df = df.loc[df.index.isin(range(2013, 2030+1))]\n", + " df.index = df.index.astype(int)\n", + " off_ret_for_CA_2016_2017 = df.loc[first_year:first_year+1]['CA entities retired offsets'].sum() \n", + "\n", + " # CA entities: additional offsets required to be used to hit max\n", + " max_off_p2_CA_req = max_off_p2_CA - off_ret_for_CA_2016_2017\n", + "\n", + " # QC entities: no annual compliance periods, so required is same as max for the period \n", + "\n", + " # CA + QC: calculate the max offsets that could be used, given the offsets projection (offsets_sold_ann)\n", + " # minimum of those that would be required to get to max & offsets available at time of compliance event\n", + " # (given that CA entities already retired some offsets in 2016 & 2017, for emissions in 2015 & 2016)\n", + "\n", + " # as an approximation, assume that offsets available to use in Nov 1, 2018 compliance event...\n", + " # ... are all those issued through end of 2018; in reality, those issued Nov & Dec 2018 would not be available\n", + "\n", + " max_off_p2_given_off_proj = min((max_off_p2_CA_req + max_off_p2_QC), off_avail_cum.at[first_year+3])\n", + "\n", + "# if (max_off_p2_CA_req + max_off_p2_QC) > off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 2: Offset use would be limited by available offsets\")\n", + "# elif (max_off_p2_CA_req + max_off_p2_QC) <= off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 2: Offset use could be maxed out.\")\n", + " \n", + " # update off_avail_cum to remove max offset use, and remove rows with negative values\n", + " off_avail_cum = off_avail_cum - max_off_p2_given_off_proj\n", + "\n", + " else:\n", + " pass\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " first_year += 3 # step forward 3 years\n", + "\n", + " # compliance period #3 obligations for emissions 2018-2020 (due Nov 1, 2021)\n", + " if prmt.CIR_offsets_q_sums.index.max() < quarter_period(f'{first_year+3}Q4'):\n", + " # latest CIR data is before 2021Q4, so simulate compliance period #3 obligations\n", + " # max offsets are 8% for CA & QC; \n", + " # assume ARB allows emitters to go up to max for whole compliance period, regardless of what they retired earlier\n", + "\n", + " max_off_p3_CA = prmt.emissions_ann_CA.loc[first_year:first_year+2].sum() * 0.08\n", + " max_off_p3_QC = prmt.emissions_ann_QC.loc[first_year:first_year+2].sum() * 0.08\n", + "\n", + " # calculate the max offsets that could be used, given the offsets projection (offsets_sold_ann)\n", + " # minimum of theoretical max & offsets available at time of compliance event\n", + " max_off_p3_given_off_proj = min((max_off_p3_CA + max_off_p3_QC), off_avail_cum.at[first_year+3])\n", + "\n", + "# if (max_off_p3_CA + max_off_p3_QC) > off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 3: Offset use would be limited by available offsets\")\n", + "# elif (max_off_p3_CA + max_off_p3_QC) <= off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 3: Offset use could be maxed out.\")\n", + "\n", + " # update off_avail_cum to remove max offset use, and remove rows with negative values\n", + " off_avail_cum = off_avail_cum - max_off_p3_given_off_proj\n", + " else:\n", + " pass\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " first_year += 3 # step forward 3 years\n", + "\n", + " # compliance period #4 obligations for emissions 2021-2023 (due Nov 1, 2024)\n", + " if prmt.CIR_offsets_q_sums.index.max() < quarter_period(f'{first_year+3}Q4'):\n", + " # latest CIR data is before 2021Q4, so simulate compliance period #3 obligations\n", + " # max offsets are 8% for CA & QC; \n", + " # assume ARB allows emitters to go up to max for whole compliance period, regardless of what they retired earlier\n", + "\n", + " max_off_p4_CA = prmt.emissions_ann_CA.loc[first_year:first_year+2].sum() * 0.04\n", + " max_off_p4_QC = prmt.emissions_ann_QC.loc[first_year:first_year+2].sum() * 0.08\n", + "\n", + " # calculate the max offsets that could be used, given the offsets projection (offsets_sold_ann)\n", + " # minimum of theoretical max & offsets available at time of compliance event\n", + " max_off_p4_given_off_proj = min((max_off_p4_CA + max_off_p4_QC), off_avail_cum.at[first_year+3])\n", + "\n", + "# if (max_off_p4_CA + max_off_p4_QC) > off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 4: Offset use would be limited by available offsets\")\n", + "# elif (max_off_p4_CA + max_off_p4_QC) <= off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 4: Offset use could be maxed out.\")\n", + "\n", + " # update off_avail_cum to remove max offset use, and remove rows with negative values\n", + " off_avail_cum = off_avail_cum - max_off_p4_given_off_proj\n", + " else:\n", + " pass\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " first_year += 3 # step forward 3 years\n", + "\n", + " # compliance period #5 obligations for emissions 2024-2026 (due in full Nov 1, 2027)\n", + "\n", + " # ***IMPORTANT NOTE***\n", + " # this compliance period is anomalous for CA because of change in offset max use under AB 398 from 4% to 6%,\n", + " # which doesn't match with timing of compliance period deadline\n", + "\n", + " if prmt.CIR_offsets_q_sums.index.max() < quarter_period(f'{first_year+3}Q4'):\n", + " # latest CIR data is before 2021Q4, so simulate compliance period #3 obligations\n", + " # max offsets are 8% for CA & QC; \n", + " # assume ARB allows emitters to go up to max for whole compliance period, regardless of what they retired earlier\n", + "\n", + " max_off_CA_2024_2025 = prmt.emissions_ann_CA.loc[first_year:first_year+1].sum() * 0.04\n", + " max_off_CA_2026 = prmt.emissions_ann_CA.loc[first_year+2].sum() * 0.06\n", + " max_off_p5_QC = prmt.emissions_ann_QC.loc[first_year:first_year+2].sum() * 0.08\n", + "\n", + " max_off_p5 = max_off_CA_2024_2025 + max_off_CA_2026 + max_off_p5_QC\n", + "\n", + " # calculate the max offsets that could be used, given the offsets projection (offsets_sold_ann)\n", + " # minimum of theoretical max & offsets available at time of compliance event\n", + " max_off_p5_given_off_proj = min(max_off_p5, off_avail_cum.at[first_year+3])\n", + "\n", + "# if max_off_p5 > off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 5: Offset use would be limited by available offsets\")\n", + "# elif max_off_p5 <= off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 5: Offset use could be maxed out.\")\n", + "\n", + " # update off_avail_cum to remove max offset use, and remove rows with negative values\n", + " off_avail_cum = off_avail_cum - max_off_p5_given_off_proj\n", + " else:\n", + " pass\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " first_year += 3 # step forward 3 years\n", + "\n", + " # compliance period #6 obligations for emissions 2027-2029 (due Nov 1, 2030)\n", + "\n", + " if prmt.CIR_offsets_q_sums.index.max() < quarter_period(f'{first_year+3}Q4'):\n", + " # latest CIR data is before 2021Q4, so simulate compliance period #3 obligations\n", + " # max offsets are 8% for CA & QC; \n", + " # assume ARB allows emitters to go up to max for whole compliance period, regardless of what they retired earlier\n", + "\n", + " max_off_p6_CA = prmt.emissions_ann_CA.loc[first_year:first_year+2].sum() * 0.06\n", + " max_off_p6_QC = prmt.emissions_ann_QC.loc[first_year:first_year+2].sum() * 0.08\n", + "\n", + " # calculate the max offsets that could be used, given the offsets projection (offsets_sold_ann)\n", + " # minimum of theoretical max & offsets available at time of compliance event\n", + " max_off_p6_given_off_proj = min((max_off_p6_CA + max_off_p6_QC), off_avail_cum.at[first_year+3])\n", + "\n", + "# if (max_off_p6_CA + max_off_p6_QC) > off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 6: Offset use would be limited by available offsets\")\n", + "# elif (max_off_p6_CA + max_off_p6_QC) <= off_avail_cum.at[first_year+3]:\n", + "# print(\"Compliance period 6: Offset use could be maxed out.\")\n", + "\n", + " # update off_avail_cum to remove max offset use, and remove rows with negative values\n", + " off_avail_cum = off_avail_cum - max_off_p6_given_off_proj\n", + " else:\n", + " pass\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " if off_avail_cum.sum() > 0:\n", + " prmt.excess_offsets = off_avail_cum.at[2030]\n", + " \n", + " # round off for display\n", + " excess_int = int(round(prmt.excess_offsets, 0))\n", + " \n", + " error_msg_1 = \"Warning\" + \"! The scenario's settings led to excess offsets beyond what could be used by the end of 2030.\"\n", + " logging.info(error_msg_1)\n", + " prmt.error_msg_post_refresh += [error_msg_1]\n", + " error_msg_2 = f\"The excess of offsets was {excess_int} MMTCO2e.\"\n", + " logging.info(error_msg_2)\n", + " prmt.error_msg_post_refresh += [error_msg_2]\n", + " line_break = \" \"\n", + " prmt.error_msg_post_refresh += [line_break]\n", + " \n", + " else:\n", + " prmt.excess_offsets = 0\n", + " pass\n", + " \n", + " # no return; set object attribute above \n", + "\n", + "# end of excess_offsets_calc" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def supply_demand_calculations():\n", + " \"\"\"\n", + " For emissions and offsets, get values by calling functions within this func.\n", + " \n", + " For auctions, use object attributes scenario_CA.snaps_end & scenario_QC.snaps_end, as calculated in model run.\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~\n", + " # EMISSIONS\n", + " # sets attributes prmt.emissions_ann, prmt.emissions_ann_CA, prmt.emissions_ann_QC\n", + " emissions_projection()\n", + " \n", + " # AUCTIONS\n", + " # results stored in object attributes scenario_CA.snaps_end & scenario_QC.snaps_end\n", + " # (either default values or from new custom run)\n", + " # these object attributes are accessed directly by supply_demand_calculations\n", + " \n", + " # NET FLOW FROM ON:\n", + " # As noted in 2018Q2 CIR:\n", + " # \"As of that date, there are 13,186,967 more compliance instruments held in California and Québec accounts \n", + " # than the total number of compliance instruments issued by those two jurisdictions alone.\"\n", + " net_flow_from_ON = 13.186967\n", + " # added into allow_vintaged_cumul below, attributed to 2018\n", + " \n", + " # OFFSETS: calculated later in this func\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~\n", + " \n", + " # BANKING METRIC: supply: allowances\n", + "\n", + " # create local variable snaps_end_Q4_CA_QC; use different source depending on scenario/settings\n", + " # use copy to avoid modifying object attributes \n", + " # (either prmt.snaps_end_Q4 or scenario_CA.snaps_end & scenario_QC.snaps_end)\n", + " \n", + " if prmt.run_hindcast == True:\n", + " # use new auction results in scenario_CA.snaps_end & scenario_QC.snaps_end\n", + " # scenario_CA.snaps_end & scenario_QC.snaps_end are lists; concat all dfs in the combined list\n", + " df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False)\n", + "\n", + " # keep only Q4\n", + " snaps_end_Q4_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy()\n", + " \n", + " elif prmt.run_hindcast == False:\n", + " if prmt.years_not_sold_out == () or prmt.fract_not_sold == float(0):\n", + " # no new auction results; use default pre-run: prmt.snaps_end_Q4\n", + " test_snaps_end_Q4_sum()\n", + " snaps_end_Q4_CA_QC = prmt.snaps_end_Q4.copy()\n", + "\n", + " else:\n", + " # there are new auction results in scenario_CA.snaps_end & scenario_QC.snaps_end\n", + " # scenario_CA.snaps_end & scenario_QC.snaps_end are lists; concat all dfs in the combined list\n", + " df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False)\n", + "\n", + " # keep only Q4\n", + " snaps_end_Q4_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy()\n", + " \n", + " # create col 'snap_yr' to replace col 'snap_q'\n", + " snaps_end_Q4_CA_QC['snap_yr'] = snaps_end_Q4_CA_QC['snap_q'].dt.year\n", + " snaps_end_Q4_CA_QC = snaps_end_Q4_CA_QC.drop(columns=['snap_q'])\n", + "\n", + " # select only the allowances in private accounts (general account and compliance account)\n", + " private_acct_mask = snaps_end_Q4_CA_QC.index.get_level_values('acct_name').isin(['gen_acct', 'comp_acct'])\n", + " snaps_CAQC_toward_bank = snaps_end_Q4_CA_QC.loc[private_acct_mask]\n", + " \n", + " # ~~~~~~~~~~~~~~\n", + " # ### allowances with vintages\n", + " # B = **A'** + N + O - E\n", + "\n", + " # A': vintaged allowances sold or distributed\n", + " # (before retirements for compliance, as is the case with all_accts & snaps)\n", + "\n", + " # specifically, of vintages up to year of banking metric\n", + " # and excluding VRE allowances\n", + " # filter snaps for allowances in gen_acct & comp_acct excludes VRE & unsold\n", + " df = snaps_CAQC_toward_bank.copy()\n", + "\n", + " # mask1 = df.index.get_level_values('vintage') <= df.index.get_level_values('snap_q').year\n", + " mask1 = df.index.get_level_values('vintage') <= df['snap_yr']\n", + " mask2 = df.index.get_level_values('acct_name').isin(['gen_acct', 'comp_acct'])\n", + " # note that mask2 will get vintages up to banking metric year, and also filter out non-vintage allowances\n", + " mask = (mask1) & (mask2)\n", + " df = df.loc[mask]\n", + "\n", + " # result contains allowances sold at advance and current auctions, as well as allowances freely allocated\n", + "\n", + " df = df.groupby('snap_yr').sum()\n", + " df.index.name = 'snap_yr'\n", + " \n", + " # convert to Series by only selecting ['quant']\n", + " allow_vintaged_cumul = df['quant']\n", + " allow_vintaged_cumul.name = 'allow_vintaged_cumul'\n", + " \n", + " allow_vint_ann = allow_vintaged_cumul.copy().diff()\n", + " first_year = allow_vint_ann.index.min()\n", + " allow_vint_ann.at[first_year] = allow_vintaged_cumul.at[first_year]\n", + " allow_vint_ann.name = 'allow_vint_ann'\n", + " \n", + " # add net flow from ON to 2018\n", + " allow_vint_ann.at[2018] = allow_vint_ann.at[2018] + net_flow_from_ON\n", + " \n", + " # ~~~~~~~~~~~~~~\n", + " # ### allowances with no vintage (APCR, Early Action)\n", + " # B = A' + **N** + O - E\n", + "\n", + " # N: non-vintaged allowances (APCR and Early Action) in private accounts\n", + " # (before retirements for compliance, as is the case with all_accts & snaps)\n", + "\n", + " df = snaps_CAQC_toward_bank.copy()\n", + "\n", + " # APCR assigned vintage 2200\n", + " # Early Action assigned vintage 2199\n", + "\n", + " mask1 = df.index.get_level_values('vintage') >= 2199\n", + " mask2 = df.index.get_level_values('acct_name').isin(['gen_acct', 'comp_acct'])\n", + "\n", + " mask = (mask1) & (mask2)\n", + " df = df.loc[mask]\n", + "\n", + " df = df.groupby('snap_yr').sum()\n", + " df.index.name = 'snap_yr'\n", + " allow_nonvint_cumul = df['quant']\n", + " allow_nonvint_cumul.name = 'allow_nonvint_cumul'\n", + "\n", + " allow_nonvint_ann = allow_nonvint_cumul.diff()\n", + " first_year = allow_nonvint_ann.index.min()\n", + " allow_nonvint_ann.at[first_year] = allow_nonvint_cumul.at[first_year]\n", + " allow_nonvint_ann.name = 'allow_nonvint_ann'\n", + "\n", + " # ~~~~~~~~~~~~~~\n", + " # OFFSET SUPPLY:\n", + " offsets_sold_ann = offsets_projection() \n", + " # ~~~~~~~~~~~~~~\n", + " \n", + " # BANKING METRIC: calculation\n", + " # BANOE method: annual\n", + " # B = A' + N + O - E\n", + " emissions_ann_neg = -1 * prmt.emissions_ann\n", + " emissions_ann_neg.name = 'emissions_ann_neg'\n", + " \n", + " dfs_to_concat = [allow_vint_ann,\n", + " allow_nonvint_ann,\n", + " offsets_sold_ann,\n", + " prmt.emissions_ann,\n", + " emissions_ann_neg]\n", + " bank_elements = pd.concat(dfs_to_concat, axis=1)\n", + " \n", + " bank_elements['bank_ann'] = bank_elements[['allow_vint_ann', \n", + " 'allow_nonvint_ann',\n", + " 'offsets_sold_ann', \n", + " 'emissions_ann_neg']].sum(axis=1)\n", + "\n", + " bank_elements = bank_elements.drop('emissions_ann_neg', axis=1)\n", + " \n", + " bank_elements['bank_cumul'] = bank_elements['bank_ann'].cumsum()\n", + "\n", + " # ~~~~~~~~~~~~~~\n", + " # BALANCE METRIC: bank + unsold current allowances\n", + " # note there are also allowances not in gen_acct or comp_acct:\n", + " # unsold allowances, held in auct_hold\n", + " # unsold APCR allowances, held in APCR_acct\n", + " # VRE, held in VRE_acct\n", + " \n", + " df = snaps_end_Q4_CA_QC.copy()\n", + " \n", + " mask1 = df.index.get_level_values('acct_name') == 'auct_hold'\n", + " mask2 = df.index.get_level_values('auct_type') == 'current' # to exclude advance\n", + " mask3 = df.index.get_level_values('status') == 'unsold'\n", + " mask = (mask1) & (mask2) & (mask3)\n", + " df = df.loc[mask]\n", + " unsold_cur_sum = df.groupby('snap_yr')['quant'].sum()\n", + "\n", + " # note that this is the year-end value of unsold in stock\n", + " # so for 2017, it is the value in 2017Q4, after ~14 M allowances were reintro & sold in the 2017Q4 auction\n", + " # the peak value was after 2017Q1 auction, with ~141 M unsold\n", + "\n", + " # balance = bank + unsold current allowances\n", + " balance = pd.concat([bank_elements['bank_cumul'], unsold_cur_sum], axis=1).sum(axis=1)\n", + "\n", + " # note there are QC alloc held back for first true-up, which are retained in alloc_hold;\n", + " # these could arguably be included in bank, but would add only ~4 M to annual supply each year (~1% of total supply)\n", + " \n", + " # ~~~~~~~~~~~~~~\n", + " supply_ann = pd.concat([\n", + " allow_vint_ann,\n", + " allow_nonvint_ann,\n", + " offsets_sold_ann], \n", + " axis=1).sum(axis=1)\n", + " supply_ann.name = 'supply_ann'\n", + " \n", + " # ~~~~~~~~~~~~~~\n", + " # RESERVE SALES\n", + " bank_cumul_pos = bank_elements['bank_cumul']\n", + " # here, bank_cumul_pos still could have negative values; below, those are overwritten with zeros\n", + "\n", + " reserve_sales_cumul = pd.Series() # initialize\n", + " # reserve sales are negative quantities here \n", + " \n", + " for year in bank_cumul_pos.index:\n", + " if bank_cumul_pos.at[year] < 0:\n", + " # if bank is negative, put those into reserve_sales\n", + " reserve_sales_cumul.at[year] = bank_cumul_pos.at[year]\n", + " \n", + " # overwrite bank_cumul with zero for that year\n", + " bank_cumul_pos.at[year] = float(0)\n", + " else:\n", + " # if bank is positive, leave bank as is, and set reserve sales to zero\n", + " reserve_sales_cumul.at[year] = float(0)\n", + " \n", + " reserve_sales = reserve_sales_cumul\n", + " \n", + " # ~~~~~~~~~~~~\n", + " # set attributes \n", + " prmt.supply_ann = supply_ann\n", + " prmt.bank_cumul_pos = bank_cumul_pos\n", + " prmt.unsold_auct_hold_cur_sum = unsold_cur_sum\n", + " prmt.balance = balance\n", + " prmt.reserve_sales = reserve_sales\n", + " \n", + "# bank_source.data = dict(x=bank_elements.index, y=bank_elements['bank_cumul'])\n", + "# balance_source.data = dict(x=balance.index, y=balance.values)\n", + "# reserve_source.data = dict(x=reserve_sales.index, y=reserve_sales.values)\n", + " \n", + " # run create_export_df within supply_demand_calculations, \n", + " # so that the values of sliders etc are those used in the model run (and not what might be adjusted after run)\n", + " create_export_df()\n", + " # modifies attributes prmt.export_df & prmt.js_download_of_csv\n", + " \n", + " # no return\n", + "# end of supply_demand_calculations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_emissions_pct_sliders():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # simple\n", + " # create slider widgets as attributes of objects defined earlier\n", + " em_pct_CA_simp.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2017-2030\", continuous_update=False, \n", + " readout_format='.1%'\n", + " )\n", + "\n", + " em_pct_QC_simp.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2017-2030\", continuous_update=False, \n", + " readout_format='.1%'\n", + " )\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # advanced\n", + " # create slider widgets as attributes of objects defined earlier\n", + " em_pct_CA_adv1.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2017-2020\", continuous_update=False, \n", + " readout_format='.1%')\n", + " em_pct_CA_adv2.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2021-2025\", continuous_update=False, \n", + " readout_format='.1%')\n", + " em_pct_CA_adv3.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2026-2030\", continuous_update=False, \n", + " readout_format='.1%')\n", + "\n", + " em_pct_QC_adv1.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2017-2020\", continuous_update=False, \n", + " readout_format='.1%')\n", + " em_pct_QC_adv2.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2021-2025\", continuous_update=False, \n", + " readout_format='.1%')\n", + " em_pct_QC_adv3.slider = widgets.FloatSlider(value=-0.02, min=-0.07, max=0.03, step=0.005, \n", + " description=\"2026-2030\", continuous_update=False, \n", + " readout_format='.1%')\n", + " # no return\n", + "# end of create_emissions_pct_sliders" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def parse_emissions_text(text_input):\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # strip spaces from ends of text input:\n", + " text_input = text_input.strip()\n", + " \n", + " if '\\t' in text_input:\n", + " # print(\"Probably from Excel\")\n", + " text_input = text_input.split(' ')\n", + " text_input = [x.replace(',', '') for x in text_input] # if ',' in x]\n", + " text_input = [x.replace('\\t', ', ') for x in text_input] # if '\\t' in x]\n", + "\n", + "\n", + " df = pd.DataFrame([sub.split(', ') for sub in text_input])\n", + "\n", + " if df.columns[0] == 0:\n", + " df.columns = ['year', 'emissions_ann']\n", + " else:\n", + " print(\"Error\" + \"! df didn't come out as expected\")\n", + "\n", + " # in case year col was formatted with decimal places, remove them to get int\n", + " df['year'] = df['year'].str.split('.').str[0]\n", + "\n", + " try:\n", + " int(df.loc[0, 'year'])\n", + " # print(\"OK!\")\n", + " except:\n", + " df = df.drop(0)\n", + " # print(\"dropped row\")\n", + "\n", + " try:\n", + " df['year'] = df['year'].astype(int)\n", + " df = df.set_index('year')\n", + " \n", + " try:\n", + " df['emissions_ann'] = df['emissions_ann'].astype(float)\n", + " custom = df['emissions_ann']\n", + " except:\n", + " error_msg = \"Error\" + \"! Custom auction may have formatting problem. Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " custom = 'misformatted'\n", + " \n", + " except:\n", + " error_msg = \"Error\" + \"! Custom auction may have formatting problem. Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " custom = 'misformatted'\n", + "\n", + " try:\n", + " if custom.mean() > 1000 or custom.mean() < 50:\n", + " error_msg = \"Warning\" + \"! The emissions data might not have units of MMTCO2e.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " # no change to custom\n", + " \n", + " except:\n", + " print(\"Was not able to check whether data is realistic.\")\n", + "\n", + " elif text_input == '':\n", + " # will lead to error msg and revert to default inside fn emissions_projection\n", + " error_msg = \"Error\" + \"! Custom auction data was blank. Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " custom = 'blank'\n", + " \n", + " else: # if '\\t' not in text_input:\n", + " error_msg = \"Error\" + \"! Problem with custom data (possibly formatting problem). Reverting to default of -2%/year.\"\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " # no change to custom\n", + " \n", + " # override text_input value, for triggering default calculation in fn emissions_projection\n", + " custom = 'missing_slash_t'\n", + " \n", + " return(custom)\n", + "# end of parse_emissions_text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_offsets_pct_sliders():\n", + " \"\"\"\n", + " Default for CA & QC in period 2019-2020 is based on historical average calculated for 2013-2017.\n", + " (See variable prmt.offset_rate_fract_of_limit.)\n", + " \n", + " Defaults for CA in 2021-2025 & 2026-2030 are based on ARB assumption in Post-2020 analysis.\n", + " \n", + " In advanced settings, Period 1: 2019-2020; Period 2: 2021-2025; Period 3: 2026-2030\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # simple\n", + " # create slider widget as attribute of object defined earlier\n", + " off_pct_of_limit_CAQC.slider = widgets.FloatSlider(\n", + " value=prmt.offset_rate_fract_of_limit, \n", + " min=0, max=1.0, step=0.01,\n", + " description=\"2019-2030\", continuous_update=False, readout_format='.0%')\n", + " # use ARB assumption for all years 2019-2030, which fits with hist data\n", + " # this default can be based on the values in advanced settings below, \n", + " # but then also depend on emissions projection, because it will be an average over the whole period 2019-2030\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # advanced\n", + " # create slider widgets as attributes of objects defined earlier \n", + " off_pct_CA_adv1.slider = widgets.FloatSlider(\n", + " value=0.08*prmt.offset_rate_fract_of_limit, # for period 1, based on historical data for WCI; limit is 8%\n", + " min=0.0, max=0.10, step=0.005,\n", + " description=\"2019-2020\", readout_format='.1%', continuous_update=False)\n", + " \n", + " off_pct_CA_adv2.slider = widgets.FloatSlider(\n", + " value=0.04*prmt.offset_rate_fract_of_limit, # CA limit in period 2 is 4%\n", + " min=0.0, max=0.10, step=0.005, \n", + " description=\"2021-2025\", readout_format='.1%', continuous_update=False)\n", + " \n", + " off_pct_CA_adv3.slider = widgets.FloatSlider(\n", + " value=0.06*prmt.offset_rate_fract_of_limit, # CA limit in period 3 is 6%\n", + " min=0.0, max=0.10, step=0.005, \n", + " description=\"2026-2030\", readout_format='.1%', continuous_update=False)\n", + "\n", + " off_pct_QC_adv1.slider = widgets.FloatSlider(\n", + " value=0.08*prmt.offset_rate_fract_of_limit, # for period 1, based on historical data for WCI; limit is 8%\n", + " min=0.0, max=0.10, step=0.005, \n", + " description=\"2019-2020\", readout_format='.1%', continuous_update=False)\n", + " \n", + " off_pct_QC_adv2.slider = widgets.FloatSlider(\n", + " value=0.08*prmt.offset_rate_fract_of_limit, # QC limit in period 2 is 8%\n", + " min=0.0, max=0.10, step=0.005, \n", + " description=\"2021-2025\", readout_format='.1%', continuous_update=False)\n", + "\n", + " off_pct_QC_adv3.slider = widgets.FloatSlider(\n", + " value=0.08*prmt.offset_rate_fract_of_limit, # QC limit in period 3 is 8%\n", + " min=0.0, max=0.10, step=0.005, \n", + " description=\"2026-2030\", readout_format='.1%', continuous_update=False)\n", + " \n", + " # no return\n", + "# end of create_offsets_pct_sliders" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_figures():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # Figure 1. CA-QC emissions vs. instrument supplies & cap\n", + " p1 = figure(title=\"emissions and instrument supplies (annual)\",\n", + " height = 500, width = 600,\n", + " x_range=(2012.5, 2030.5),\n", + " y_range=(0, 500),\n", + " # toolbar_location=\"below\",\n", + " # toolbar_sticky=False,\n", + " )\n", + "\n", + " p1.yaxis.axis_label = \"MMTCO2e / year\"\n", + " p1.xaxis.major_label_standoff = 10\n", + " p1.xaxis.minor_tick_line_color = None\n", + " p1.yaxis.minor_tick_line_color = None\n", + " p1.outline_line_color = \"white\"\n", + " # p1.min_border_top = 10\n", + " p1.min_border_right = 15\n", + " p1.title.text_font_size = \"16px\"\n", + " \n", + " cap_CAQC = pd.concat([prmt.CA_cap, prmt.QC_cap], axis=1).sum(axis=1)\n", + " cap_CAQC_line = p1.line(cap_CAQC.index, cap_CAQC, color='lightgrey', line_width=3.5)\n", + " \n", + " supply_last_hist_yr = 2017\n", + " sup_off_CAQC_line_hist = p1.line(prmt.supply_ann.loc[:supply_last_hist_yr].index,\n", + " prmt.supply_ann.loc[:supply_last_hist_yr], \n", + " color='mediumblue', line_width=3.5) \n", + " \n", + " sup_off_CAQC_line_proj = p1.line(prmt.supply_ann.loc[supply_last_hist_yr:].index, \n", + " prmt.supply_ann.loc[supply_last_hist_yr:],\n", + " color='dodgerblue', line_width=3.5, \n", + " # line_dash='dashed'\n", + " ) \n", + " \n", + " emissions_last_hist_yr = 2016\n", + " em_CAQC_line_hist = p1.line(prmt.emissions_ann.loc[:emissions_last_hist_yr].index,\n", + " prmt.emissions_ann.loc[:emissions_last_hist_yr],\n", + " color='orangered', line_width=3.5)\n", + " \n", + " em_CAQC_line_proj = p1.line(prmt.emissions_ann.loc[emissions_last_hist_yr:].index, \n", + " prmt.emissions_ann.loc[emissions_last_hist_yr:],\n", + " color='orange', line_width=3.5, \n", + " # line_dash='dashed'\n", + " )\n", + " \n", + " legend = Legend(items=[('covered emissions (historical)', [em_CAQC_line_hist]),\n", + " ('covered emissions (projection)', [em_CAQC_line_proj]),\n", + " ('instrument supply (historical)', [sup_off_CAQC_line_hist]),\n", + " ('instrument supply (projection)', [sup_off_CAQC_line_proj]),\n", + " ('caps', [cap_CAQC_line]),\n", + " ],\n", + " label_text_font_size=\"14px\",\n", + " location=(0, 0),\n", + " border_line_color=None)\n", + "\n", + " p1.add_layout(legend, 'below')\n", + "\n", + " em_CAQC_fig = p1\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # Figure 2. CA-QC private bank and unsold current allowances (cumul.) + reserve sales\n", + "\n", + " # set y_max using balance_source, where balance is bank + unsold\n", + " y_max = (int(prmt.balance.max() / 100) + 1) * 100\n", + "\n", + " if prmt.reserve_sales.min() == 0:\n", + " y_min = 0\n", + " else:\n", + " # then abs(prmt.reserve_sales.min()) > 0\n", + " y_min = (int(prmt.reserve_sales.min() / 100) - 1) * 100\n", + "\n", + " p2 = figure(title='private bank and unsold allowances (cumulative)',\n", + " height = 600, width = 700,\n", + " x_range=(2012.5, 2030.5),\n", + " y_range=(y_min, y_max),\n", + " # toolbar_location=\"below\",\n", + " # toolbar_sticky=False,\n", + " )\n", + " \n", + " p2.xaxis.axis_label = \"at end of each year\"\n", + " p2.xaxis.major_label_standoff = 10\n", + " p2.xaxis.minor_tick_line_color = None\n", + " \n", + " p2.yaxis.axis_label = \"MMTCO2e\" \n", + " p2.yaxis.minor_tick_line_color = None\n", + " \n", + " p2.outline_line_color = \"white\"\n", + " # p2.min_border_top = 10\n", + " p2.min_border_right = 15\n", + " p2.title.text_font_size = \"16px\"\n", + "\n", + " unsold_vbar = p2.vbar(prmt.balance.index,\n", + " top=prmt.balance,\n", + " width=1,\n", + " color=Viridis[6][4],\n", + " line_width=1, line_color='dimgray')\n", + " \n", + " bank_vbar = p2.vbar(prmt.bank_cumul_pos.index,\n", + " top=prmt.bank_cumul_pos,\n", + " width=1,\n", + " color=Viridis[6][3],\n", + " line_width=0.5, line_color='dimgray')\n", + "\n", + " reserve_vbar = p2.vbar(prmt.reserve_sales.index,\n", + " top=prmt.reserve_sales,\n", + " width=1,\n", + " color='tomato',\n", + " line_width=0.5, line_color='dimgray')\n", + " \n", + " # add vertical line for divider between full historical data vs. projection (partial or full)\n", + " p2.line([emissions_last_hist_yr+0.5, emissions_last_hist_yr+0.5], \n", + " [y_min, y_max],\n", + " line_color='black', \n", + " line_width=1, \n", + " line_dash='dashed')\n", + "\n", + " legend = Legend(items=[('private bank', [bank_vbar]),\n", + " ('unsold allowances', [unsold_vbar]),\n", + " ('reserve sales', [reserve_vbar])\n", + " ],\n", + " location=(0, 0),\n", + " label_text_font_size=\"14px\",\n", + " border_line_color=None)\n", + "\n", + " p2.add_layout(legend, 'below')\n", + "\n", + " bank_CAQC_fig_bar = p2\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # Figure 1 & Figure 2\n", + "\n", + " # note: for published paper, could add another plot next to em_CAQC_fig,\n", + " # showing total unsold allowances (i.e., for 2016-2017, 141M)\n", + " # and showing which were (ultimately) reintro & which were rolled over to APCR\n", + " # see Github issue # 325\n", + "\n", + " prmt.Fig_1_2 = gridplot([em_CAQC_fig, bank_CAQC_fig_bar], ncols=2,\n", + " plot_width=450, plot_height=500,\n", + " toolbar_location=\"below\", toolbar_options={'logo': None}\n", + " )\n", + " \n", + "# # configure so that no drag tools are active\n", + "# prmt.Fig_1_2.toolbar.active_drag = None\n", + "\n", + "# # configure so that Bokeh chooses what (if any) scroll tool is active\n", + "# prmt.Fig_1_2.toolbar.active_scroll = \"auto\"\n", + "\n", + "# # configure so that a specific PolySelect tap tool is active\n", + "# prmt.Fig_1_2.toolbar.active_tap = poly_select\n", + "\n", + "# # configure so that a sequence of specific inspect tools are active\n", + "# # note: this only works for inspect tools\n", + "# prmt.Fig_1_2.toolbar.active_inspect = [hover_tool, crosshair_tool]\n", + " \n", + " # no returns; modifies object attributes\n", + "# end of create_figures" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def supply_demand_button_on_click(b):\n", + " \n", + " logging.info(\"***********************************************\")\n", + " logging.info(\"start of new model run, with user settings\")\n", + " logging.info(\"***********************************************\")\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " supply_demand_button.disabled = True\n", + " supply_demand_button.style.button_color = '#A9A9A9'\n", + " \n", + " prmt.error_msg_post_refresh = [] # initialize\n", + " \n", + " if auction_tabs.selected_index == 0:\n", + " # then no custom auction; default to all sell out\n", + " \n", + " # reinitialize prmt values for years_not_sold_out & fract_not_sold\n", + " # (these are used in func supply_demand_calculations to determine whether to run new auctions)\n", + " prmt.years_not_sold_out = ()\n", + " prmt.fract_not_sold = float(0)\n", + " \n", + " elif auction_tabs.selected_index == 1:\n", + " # then run custom auctions\n", + " \n", + " # set values in object prmt to be new values from user input\n", + " # this sends the user settings to the model so they'll be used in processing auctions\n", + " prmt.years_not_sold_out = years_not_sold_out_obj.wid.value\n", + " prmt.fract_not_sold = fract_not_sold_obj.wid.value\n", + " \n", + " # set local variables to be prmt versions\n", + " years_not_sold_out = prmt.years_not_sold_out\n", + " fract_not_sold = prmt.fract_not_sold\n", + " \n", + " if years_not_sold_out != () and fract_not_sold > 0:\n", + " # generate new data set auction_sales_pcts_all based on user settings & set object attribute\n", + " # calculated using function get_auction_sales_pcts_all\n", + " get_auction_sales_pcts_all()\n", + "\n", + " # process new auctions\n", + " # (includes initialize_all_accts and creation of progress bars)\n", + " all_accts_CA, all_accts_QC = process_auctions_CA_QC()\n", + " \n", + " # print(\"Finalizing results...\") # for UI\n", + " \n", + " elif years_not_sold_out == ():\n", + " # defaults to pre-run scenario in which all auctions sell out\n", + " error_msg = \"Warning\" + \"! No years selected for auctions with unsold allowances. Defaulted to scenario: all auctions sell out.\" # for UI\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " line_break = \" \"\n", + " prmt.error_msg_post_refresh += [line_break]\n", + " \n", + " # reset prmt values for years_not_sold_out & fract_not_sold\n", + " prmt.years_not_sold_out = ()\n", + " prmt.fract_not_sold = float(0)\n", + " \n", + " elif fract_not_sold == 0.0:\n", + " # defaults to pre-run scenario in which all auctions sell out\n", + " error_msg = \"Warning\" + \"! Auction percentage unsold was set to zero. Defaulted to scenario: all auctions sell out.\" # for UI\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " line_break = \" \"\n", + " prmt.error_msg_post_refresh += [line_break]\n", + " \n", + " # reset prmt values for years_not_sold_out & fract_not_sold\n", + " prmt.years_not_sold_out = ()\n", + " prmt.fract_not_sold = float(0)\n", + " \n", + " else:\n", + " # defaults to pre-run scenario in which all auctions sell out\n", + " error_msg = \"Warning\" + \"! Unknown error. Defaulted to scenario: all auctions sell out.\" # for UI\n", + " logging.info(error_msg)\n", + " prmt.error_msg_post_refresh += [error_msg]\n", + " line_break = \" \"\n", + " prmt.error_msg_post_refresh += [line_break]\n", + " \n", + " # reset prmt values for years_not_sold_out & fract_not_sold\n", + " prmt.years_not_sold_out = ()\n", + " prmt.fract_not_sold = float(0)\n", + " \n", + " supply_demand_calculations()\n", + "\n", + " # create & display new graph, using new data\n", + " create_figures()\n", + " \n", + " # clear output of this cell (button and old graph)\n", + " clear_output(wait=True)\n", + " \n", + " show(prmt.Fig_1_2) \n", + " \n", + " # enable run button and change color\n", + " supply_demand_button.style.button_color = 'PowderBlue'\n", + " supply_demand_button.disabled = False\n", + "\n", + " # enable save button and change color\n", + " save_csv_button.disabled = False\n", + " save_csv_button.style.button_color = 'PowderBlue'\n", + " \n", + " # display buttons again\n", + " display(widgets.HBox([supply_demand_button, save_csv_button]))\n", + " \n", + " if prmt.error_msg_post_refresh != []:\n", + " for element in prmt.error_msg_post_refresh:\n", + " print(element) # for UI\n", + " \n", + "# end of supply_demand_button_on_click" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_emissions_tabs():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create the widgets (as attributes of objects)\n", + " create_emissions_pct_sliders()\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~\n", + " # arrange emissions sliders (simple) & ui\n", + "\n", + " # set captions\n", + " em_simp_caption_col0 = widgets.Label(value=\"California\")\n", + " em_simp_caption_col1 = widgets.Label(value=\"Quebec\")\n", + " \n", + " # create VBox with caption & slider\n", + " em_simp_col0 = widgets.VBox([em_simp_caption_col0, em_pct_CA_simp.slider])\n", + " em_simp_col1 = widgets.VBox([em_simp_caption_col1, em_pct_QC_simp.slider])\n", + "\n", + " # put each column into HBox\n", + " emissions_simp_ui = widgets.HBox([em_simp_col0, em_simp_col1])\n", + " \n", + " # put whole set of captions + sliders into VBox with header\n", + " em_simp_header = widgets.Label(value=\"Choose the annual percentage change for each jurisdiction, for the whole projection (2019-2030).\")\n", + " emissions_simp_ui_w_header = widgets.VBox([em_simp_header, emissions_simp_ui])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~\n", + " # arrange emissions sliders (advanced) & ui\n", + "\n", + " # set captions\n", + " em_adv_caption_col0 = widgets.Label(value=\"California\")\n", + " em_adv_caption_col1 = widgets.Label(value=\"Quebec\")\n", + "\n", + " # create VBox with caption & slider\n", + " em_adv_col0 = widgets.VBox([em_adv_caption_col0, \n", + " em_pct_CA_adv1.slider, \n", + " em_pct_CA_adv2.slider, \n", + " em_pct_CA_adv3.slider])\n", + " em_adv_col1 = widgets.VBox([em_adv_caption_col1, \n", + " em_pct_QC_adv1.slider, \n", + " em_pct_QC_adv2.slider, \n", + " em_pct_QC_adv3.slider])\n", + "\n", + " # put each column into HBox\n", + " emissions_adv_ui = widgets.HBox([em_adv_col0, em_adv_col1])\n", + " \n", + " # put whole set of captions + sliders into VBox with header\n", + " em_adv_header = widgets.Label(value=\"Choose the annual percentage change for each jurisdiction, for each of the specified time spans.\")\n", + " emissions_adv_ui_w_header = widgets.VBox([em_adv_header, emissions_adv_ui])\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " # custom emissions input\n", + "\n", + "# emissions_text_CA = widgets.Text(\n", + "# value='',\n", + "# placeholder='Paste data here',\n", + "# description='California:',\n", + "# disabled=False\n", + "# )\n", + "# emissions_text_QC = widgets.Text(\n", + "# value='',\n", + "# placeholder='Paste data here',\n", + "# description='Quebec:',\n", + "# disabled=False\n", + "# )\n", + " em_text_input_CAQC_obj.wid = widgets.Text(\n", + " value='',\n", + " placeholder='Paste data here',\n", + " # description='CA + QC:',\n", + " disabled=False\n", + " )\n", + "\n", + " # caption_CA_QC_indiv = widgets.Label(value=\"Enter data for California and Quebec separately\")\n", + " em_text_input_CAQC_cap = widgets.Label(value=\"Enter annual emissions data (sum of California and Quebec)\")\n", + " \n", + " em_custom_footnote = widgets.HTML(value=em_custom_footnote_text)\n", + " \n", + " em_text_input_CAQC_ui = widgets.VBox([\n", + " # caption_CA_QC_indiv, emissions_text_CA, emissions_text_QC, \n", + " em_text_input_CAQC_cap, em_text_input_CAQC_obj.wid, \n", + " em_custom_footnote\n", + " ])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " tab = widgets.Tab()\n", + " tab.children = [emissions_simp_ui_w_header, \n", + " emissions_adv_ui_w_header, \n", + " em_text_input_CAQC_ui]\n", + " tab.set_title(0, 'simple')\n", + " tab.set_title(1, 'advanced')\n", + " tab.set_title(2, 'custom')\n", + " \n", + " emissions_tabs = tab\n", + " \n", + " return(emissions_tabs)\n", + " \n", + "# end of create_emissions_tabs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_auction_tabs():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create auction settings: simple\n", + " \n", + " # create widget, which is just a text label; the user has no options\n", + " cap = \"The default setting is that all auctions sell out.
To use this default assumption, leave this tab open.\"\n", + " auction_simp_caption_col0 = widgets.HTML(value=cap)\n", + " \n", + " # TO DO: if we want to specify other pre-run scenarios besides all sell out,\n", + " # then could use a RadioButton here to choose among the scenarios\n", + " \n", + " # put into ui\n", + " auction_simp_ui = widgets.HBox([auction_simp_caption_col0])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~\n", + " # create auction settings: advanced\n", + " year_list = list(range(2019, 2030+1))\n", + "\n", + " # create widgets for \"years not sold out\" and \"% not sold\" (as attributes of objects)\n", + " years_not_sold_out_obj.wid = widgets.SelectMultiple(\n", + " options=year_list,\n", + " value=[],\n", + " rows=len(year_list),\n", + " description='years',\n", + " disabled=False)\n", + " \n", + " fract_not_sold_obj.wid = widgets.FloatSlider(min=0.0, max=1.0, step=0.05,\n", + " description=\"% unsold\", continuous_update=False, \n", + " readout_format='.0%')\n", + " \n", + " # put widgets in boxes for ui & create final ui\n", + " years_pct_HBox = widgets.HBox([years_not_sold_out_obj.wid, fract_not_sold_obj.wid])\n", + " auction_adv_ui = widgets.VBox([years_pct_HBox])\n", + "\n", + " auction_adv_ui_header_text = \"Choose particular years in which auctions would have a portion of allowances go unsold.
To select multiple years, hold down 'ctrl' (Windows) or 'command' (Mac), or to select a range of years, hold down Shift and click the start and end of the range.
Then choose the percentage of allowances that go unsold in the auctions (both current and advance) in the years selected.\"\n", + " auction_adv_ui_header = widgets.HTML(value=auction_adv_ui_header_text)\n", + " auction_adv_ui_w_header = widgets.VBox([auction_adv_ui_header, auction_adv_ui])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~\n", + " # format auction_tabs for simple & advanced\n", + " \n", + " children = [auction_simp_ui, auction_adv_ui_w_header]\n", + " \n", + " tab = widgets.Tab()\n", + " tab.children = children\n", + " tab.set_title(0, 'simple')\n", + " tab.set_title(1, 'advanced')\n", + " \n", + " auction_tabs = tab\n", + " \n", + " return(auction_tabs)\n", + "# end of create_auction_tabs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_offsets_tabs():\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create the sliders (as attributes of objects)\n", + " create_offsets_pct_sliders()\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~\n", + " # create emissions sliders (simple) & ui\n", + " # uses slider: off_pct_of_limit_CAQC.slider\n", + " off_simp_header = widgets.Label(value=\"Choose the offset supply, as a percentage of the limit that can be used.\")\n", + " off_simp_caption = widgets.Label(value=\"California & Quebec\")\n", + " off_simp_footer = widgets.Label(value=\"For California, the limits for each time period are 8% (2018-2020), 4% (2021-2025), and 6% (2026-2030). For Quebec, the limit is 8% for all years.\")\n", + " off_simp_ui_w_header = widgets.VBox([off_simp_header, \n", + " off_simp_caption, \n", + " off_pct_of_limit_CAQC.slider, \n", + " off_simp_footer])\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~\n", + " # create emissions sliders (advanced) & ui\n", + "\n", + " off_adv_header = widgets.Label(value=\"Choose the offset supply as a percentage of covered emissions, for each jurisdiction, for each time span.\")\n", + "\n", + " off_adv_caption_col0 = widgets.Label(value=\"California\")\n", + " off_adv_caption_col1 = widgets.Label(value=\"Quebec\")\n", + " off_adv_footer1 = widgets.Label(value=\"For California, the limits for each time period are 8% (2018-2020), 6% (2021-2025), and 4% (2026-2030). For Quebec, the limit is 8% for all years.\")\n", + " off_adv_footer2 = widgets.Label(value=\"Warning: The sliders above may allow you to set offsets supply higher than the quantity that could be used through 2030.\")\n", + " off_adv_col0 = widgets.VBox([off_adv_caption_col0, \n", + " off_pct_CA_adv1.slider, \n", + " off_pct_CA_adv2.slider, \n", + " off_pct_CA_adv3.slider])\n", + " \n", + " off_adv_col1 = widgets.VBox([off_adv_caption_col1, \n", + " off_pct_QC_adv1.slider, \n", + " off_pct_QC_adv2.slider, \n", + " off_pct_QC_adv3.slider])\n", + "\n", + " off_adv_ui = widgets.HBox([off_adv_col0, off_adv_col1])\n", + " off_adv_ui_w_header = widgets.VBox([off_adv_header, off_adv_ui, off_adv_footer1, off_adv_footer2])\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " children = [off_simp_ui_w_header, \n", + " off_adv_ui_w_header]\n", + "\n", + " tab = widgets.Tab()\n", + " tab.children = children\n", + " tab.set_title(0, 'simple')\n", + " tab.set_title(1, 'advanced')\n", + " \n", + " offsets_tabs = tab\n", + " \n", + " return(offsets_tabs) \n", + "# end of create_offsets_tabs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_export_df():\n", + " \n", + " # metadata for figure_for_export\n", + " descrip_list = [f'WCI cap-and-trade model version {prmt.model_version}'] # initialize with model version number\n", + " metadata_list = [f'https://github.com/nearzero/WCI-cap-and-trade/tree/v{prmt.model_version}'] # initialize with model version number\n", + " metadata_list_of_tuples = [] # initialize\n", + "\n", + " if emissions_tabs.selected_index == 0: \n", + " # user choice: simple emissions\n", + " descrip_list += [\n", + " 'emissions annual % change CA, 2017-2030', # 'em_pct_CA_simp',\n", + " 'emissions annual % change QC, 2017-2030'# 'em_pct_QC_simp',\n", + " ]\n", + " metadata_list += [str(100*em_pct_CA_simp.slider.value)+'%', \n", + " str(100*em_pct_QC_simp.slider.value)+'%']\n", + "\n", + " elif emissions_tabs.selected_index == 1:\n", + " # user choice: advanced emissions\n", + " descrip_list += [\n", + " 'emissions annual % change CA, 2017-2020', # 'em_pct_CA_adv1.slider.value',\n", + " 'emissions annual % change CA, 2021-2025', # 'em_pct_CA_adv2.slider.value', \n", + " 'emissions annual % change CA, 2026-2030', # 'em_pct_CA_adv3.slider.value', \n", + " 'emissions annual % change QC, 2017-2020', # 'em_pct_QC_adv1.slider.value',\n", + " 'emissions annual % change QC, 2021-2025', # 'em_pct_QC_adv2.slider.value', \n", + " 'emissions annual % change QC, 2026-2030', # 'em_pct_QC_adv3.slider.value', \n", + " ]\n", + " metadata_list += [str(100*em_pct_CA_adv1.slider.value)+'%', \n", + " str(100*em_pct_CA_adv2.slider.value)+'%', \n", + " str(100*em_pct_CA_adv3.slider.value)+'%', \n", + " str(100*em_pct_QC_adv1.slider.value)+'%', \n", + " str(100*em_pct_QC_adv2.slider.value)+'%', \n", + " str(100*em_pct_QC_adv3.slider.value)+'%']\n", + "\n", + " elif emissions_tabs.selected_index == 2:\n", + " # user choice: custom emissions\n", + " descrip_list += ['custom emissions']\n", + " metadata_list += ['see values at left']\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " if auction_tabs.selected_index == 0:\n", + " # user choice: simple auction (all sell out)\n", + " descrip_list += ['simple auction']\n", + " metadata_list += ['all sell out']\n", + "\n", + " elif auction_tabs.selected_index == 1:\n", + " descrip_list += [\n", + " 'auctions: years that did not sell out', # 'years_not_sold_out_obj', \n", + " 'auctions: % unsold', # 'fract_not_sold_obj'\n", + " ]\n", + " metadata_list += [list(years_not_sold_out_obj.wid.value),\n", + " str(100*fract_not_sold_obj.wid.value)+'%']\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " if offsets_tabs.selected_index == 0:\n", + " # user choice: simple offsets\n", + " descrip_list += [\n", + " 'offset supply as % of limit' # 'off_pct_of_limit_CAQC'\n", + " ]\n", + " metadata_list += [str(100*off_pct_of_limit_CAQC.slider.value)+'%']\n", + "\n", + " elif offsets_tabs.selected_index == 1: \n", + " # user choice: advanced offsets\n", + " descrip_list += [\n", + " 'offset supply as % of emissions, CA, 2017-2020', # 'off_pct_CA_adv1.slider.value',\n", + " 'offset supply as % of emissions, CA, 2021-2025', # 'off_pct_CA_adv2.slider.value', \n", + " 'offset supply as % of emissions, CA, 2026-2030', # 'off_pct_CA_adv3.slider.value', \n", + " 'offset supply as % of emissions, QC, 2017-2020', # 'off_pct_QC_adv1.slider.value',\n", + " 'offset supply as % of emissions, QC, 2021-2025', # 'off_pct_QC_adv2.slider.value', \n", + " 'offset supply as % of emissions, QC, 2026-2030', # 'off_pct_QC_adv3.slider.value', \n", + " ]\n", + " metadata_list += [\n", + " str(100*off_pct_CA_adv1.slider.value)+'%', \n", + " str(100*off_pct_CA_adv2.slider.value)+'%', \n", + " str(100*off_pct_CA_adv3.slider.value)+'%', \n", + " str(100*off_pct_QC_adv1.slider.value)+'%', \n", + " str(100*off_pct_QC_adv2.slider.value)+'%', \n", + " str(100*off_pct_QC_adv3.slider.value)+'%',\n", + " ]\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # compile metadata_list_of_tuples\n", + " for element_num in range(len(metadata_list)):\n", + " metadata_list_of_tuples += [(descrip_list[element_num], metadata_list[element_num])]\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # add warning about excess offsets (if any)\n", + " if prmt.excess_offsets > 0:\n", + " metadata_list_of_tuples += [\n", + " (\"scenario has excess offsets at end of 2030:\", \n", + " f\"{int(round(prmt.excess_offsets, 0))} MMTCO2e\")\n", + " ]\n", + " else:\n", + " pass\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # add warning about reverting to default auctions\n", + " \n", + " \n", + " # SANDBOX:\n", + " for element_num in range(len(prmt.error_msg_post_refresh)):\n", + " if 'No years selected for auctions with unsold allowances' in prmt.error_msg_post_refresh[element_num]:\n", + " metadata_list_of_tuples += [\n", + " (\"Warning\"+ \"! No years selected for auctions with unsold allowances\", \n", + " \"Defaulted to scenario: all auctions sell out\")\n", + " ]\n", + " else:\n", + " pass\n", + " \n", + " if 'Auction percentage unsold was set to zero' in prmt.error_msg_post_refresh[element_num]:\n", + " metadata_list_of_tuples += [\n", + " (\"Warning\" + \"! Auction percentage unsold was set to zero\", \n", + " \"Defaulted to scenario: all auctions sell out\")\n", + " ]\n", + " else:\n", + " pass\n", + " \n", + " metadata_df = pd.DataFrame(metadata_list_of_tuples, columns=['setting descriptions', 'setting values'])\n", + " \n", + " # shift index to align with data\n", + " metadata_df.index = metadata_df.index + 2013\n", + " \n", + " emissions_export = prmt.emissions_ann.copy()\n", + " emissions_export.name = 'CA-QC covered emissions [MMTCO2e/year]'\n", + " \n", + " supply_export = prmt.supply_ann.copy()\n", + " supply_export.name = 'instrument supply additions [MMTCO2e/year]'\n", + " \n", + " bank_export = prmt.bank_cumul_pos.copy()\n", + " bank_export.name = 'banked instruments [MMTCO2e]'\n", + " \n", + " unsold_export = prmt.unsold_auct_hold_cur_sum.copy()\n", + " unsold_export.name = 'unsold allowances of current vintage or earlier [MMTCO2e]'\n", + " \n", + " reserve_export = -1 * prmt.reserve_sales.copy()\n", + " reserve_export.name = 'reserve sales [MMTCO2e]'\n", + " \n", + " export_df = pd.concat([emissions_export, \n", + " supply_export,\n", + " bank_export,\n", + " unsold_export,\n", + " reserve_export,\n", + " metadata_df], \n", + " axis=1)\n", + " export_df.index.name = 'year'\n", + " \n", + " # set attribute\n", + " prmt.export_df = export_df\n", + " \n", + " save_timestamp = time.strftime('%Y-%m-%d_%H%M%S', time.localtime())\n", + " \n", + " prmt.js_download_of_csv = \"\"\"\n", + " var csv = '%s';\n", + " \n", + " var filename = '%s';\n", + " var blob = new Blob([csv], { type: 'text/csv;charset=utf-8;' });\n", + " if (navigator.msSaveBlob) { // IE 10+\n", + " navigator.msSaveBlob(blob, filename);\n", + " } else {\n", + " var link = document.createElement(\"a\");\n", + " if (link.download !== undefined) { // feature detection\n", + " // Browsers that support HTML5 download attribute\n", + " var url = URL.createObjectURL(blob);\n", + " link.setAttribute(\"href\", url);\n", + " link.setAttribute(\"download\", filename);\n", + " link.style.visibility = 'hidden';\n", + " document.body.appendChild(link);\n", + " link.click();\n", + " document.body.removeChild(link);\n", + " }\n", + " }\n", + " \"\"\" % (prmt.export_df.to_csv(index=True).replace('\\n','\\\\n').replace(\"'\",\"\\'\"), \n", + " f'Near_Zero_WCI_cap_and_trade_model_results_{save_timestamp}.csv')\n", + "\n", + "# end of create_export_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "figure_html = widgets.HTML(\n", + " value=figure_explainer_text,\n", + " # placeholder='Some HTML',\n", + " # description='',\n", + ")\n", + "figure_explainer_accord = widgets.Accordion(\n", + " children=[figure_html], \n", + " layout=widgets.Layout(width=\"650px\")\n", + ")\n", + "figure_explainer_accord.set_title(0, 'About supply-demand balance and banking')\n", + "figure_explainer_accord.selected_index = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "if __name__ == '__main__': \n", + " # show figure_explainer_accord above figure\n", + " display(figure_explainer_accord)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# create tabs for emissions, auction, offsets\n", + "\n", + "emissions_tabs = create_emissions_tabs()\n", + "auction_tabs = create_auction_tabs()\n", + "offsets_tabs = create_offsets_tabs()\n", + "\n", + "# prepare data for default graph\n", + "supply_demand_calculations()\n", + "\n", + "# create supply-demand button (but don't show it until display step below)\n", + "supply_demand_button = widgets.Button(description=\"Run supply-demand calculations\", \n", + " layout=widgets.Layout(width=\"250px\"))\n", + "\n", + "supply_demand_button.style.button_color = 'PowderBlue'\n", + "\n", + "# ~~~~~~~~~~\n", + "em_explainer_html = widgets.HTML(\n", + " value=em_explainer_text,\n", + " # placeholder='Some HTML',\n", + " # description='',\n", + ")\n", + "\n", + "em_explainer_accord = widgets.Accordion(\n", + " children=[em_explainer_html], \n", + " layout=widgets.Layout(width=\"650px\")\n", + ")\n", + "em_explainer_accord.set_title(0, 'About covered emissions')\n", + "em_explainer_accord.selected_index = None\n", + "\n", + "emissions_tabs_explainer = widgets.VBox([emissions_tabs, em_explainer_accord])\n", + "\n", + "emissions_title = widgets.HTML(value=\"

demand projection: covered emissions

\")\n", + "\n", + "emissions_tabs_explainer_title = widgets.VBox([emissions_title, emissions_tabs_explainer])\n", + "\n", + "# ~~~~~~~~~~\n", + "\n", + "auct_explain_html = widgets.HTML(\n", + " value=auction_explainer_text,\n", + " # placeholder='Some HTML',\n", + " # description='',\n", + ")\n", + "\n", + "auct_explain_accord = widgets.Accordion(\n", + " children=[auct_explain_html], \n", + " layout=widgets.Layout(width=\"650px\")\n", + ")\n", + "auct_explain_accord.set_title(0, 'About allowance auctions')\n", + "auct_explain_accord.selected_index = None\n", + "\n", + "auction_tabs_explainer = widgets.VBox([auction_tabs, auct_explain_accord])\n", + "\n", + "auction_title = widgets.HTML(value=\"

supply projection: allowances auctioned

\")\n", + "\n", + "auction_tabs_explainer_title = widgets.VBox([auction_title, auction_tabs_explainer])\n", + "\n", + "# ~~~~~~~~~~\n", + "offsets_explainer_html = widgets.HTML(\n", + " value=offsets_explainer_text,\n", + " # placeholder='Some HTML',\n", + " # description='',\n", + ")\n", + "\n", + "offsets_explainer_accord = widgets.Accordion(\n", + " children=[offsets_explainer_html],\n", + " layout=widgets.Layout(width=\"650px\")\n", + ")\n", + "offsets_explainer_accord.set_title(0, 'About carbon offsets')\n", + "offsets_explainer_accord.selected_index = None\n", + "\n", + "offsets_tabs_explainer = widgets.VBox([offsets_tabs, offsets_explainer_accord])\n", + "\n", + "offsets_title = widgets.HTML(value=\"

supply projection: offsets

\")\n", + "\n", + "offsets_tabs_explainer_title = widgets.VBox([offsets_title, offsets_tabs_explainer])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### create figures & display them" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create two-panel figure\n", + "create_figures()\n", + "\n", + "# when supply-demand button clicked, perform action\n", + "supply_demand_button.on_click(supply_demand_button_on_click)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### prepare for exports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create button to save csv\n", + "\n", + "# starts enabled; becomes disabled after file saved; becomes re-enabled after a new model run\n", + "save_csv_button = widgets.Button(description=\"Save results & settings (csv)\", \n", + " disabled = False,\n", + " layout=widgets.Layout(width=\"250px\"),\n", + " )\n", + "save_csv_button.style.button_color = 'PowderBlue' # '#A9A9A9'\n", + "\n", + "# ~~~~~~~~~~~~~~\n", + "# define action on click\n", + "def save_csv_on_click(b):\n", + " save_csv_button.style.button_color = '#A9A9A9'\n", + " save_csv_button.disabled = True\n", + "\n", + " display(Javascript(prmt.js_download_of_csv))\n", + "# end of save_csv_on_click\n", + "\n", + "# ~~~~~~~~~~~~~~\n", + "save_csv_button.on_click(save_csv_on_click)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "if __name__ == '__main__': \n", + " show(prmt.Fig_1_2)\n", + " \n", + " # show supply-demand button & save csv buttong\n", + " display(widgets.HBox([supply_demand_button, save_csv_button]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "if __name__ == '__main__':\n", + " # display each of the three tab sets (emissions, auctions, offsets)\n", + " display(emissions_tabs_explainer_title)\n", + " display(auction_tabs_explainer_title)\n", + " display(offsets_tabs_explainer_title)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### export snaps_end_all" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# if __name__ == '__main__':\n", + "# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n", + "\n", + "# if prmt.run_hindcast == True: \n", + " \n", + "# # collect the snaps\n", + "# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False)\n", + "# snaps_end_all_CA_QC = df\n", + " \n", + "# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0:\n", + "# # export as \"all sell out (hindcast)\"\n", + "# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_all_CA_QC all sell out (hindcast) {save_timestamp}.csv\")\n", + " \n", + "# else: \n", + "# # export as \"some unsold (hindcast)\"\n", + "# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_all_CA_QC some unsold (hindcast) {save_timestamp}.csv\")\n", + "\n", + "# else: # prmt.run_hindcast == False\n", + "# try:\n", + "# # collect the snaps, select only Q4\n", + "# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False)\n", + "# snaps_end_all_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy()\n", + " \n", + "# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0:\n", + "# # export as \"all sell out (not hindcast)\"\n", + "# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_all_CA_QC all sell out (not hindcast) {save_timestamp}.csv\")\n", + "# else:\n", + "# # export as \"some unsold (not hindcast)\n", + "# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_all_CA_QC some unsold (not hindcast) {save_timestamp}.csv\")\n", + "# except:\n", + "# # no results; initial run using defaults, so snaps are empty\n", + "# # export would just be the same as prmt.snaps_end_Q4\n", + "# pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### export snaps_end_Q4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# if __name__ == '__main__':\n", + "# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n", + "\n", + "# if prmt.run_hindcast == True: \n", + " \n", + "# # collect the snaps, select only Q4\n", + "# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False)\n", + "# snaps_end_Q4_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy()\n", + "\n", + "# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0:\n", + "# # export as \"all sell out (hindcast)\"\n", + "# snaps_end_Q4_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_Q4_CA_QC all sell out (hindcast) {save_timestamp}.csv\")\n", + " \n", + "# else: \n", + "# # export as \"some unsold (hindcast)\"\n", + "# snaps_end_Q4_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_Q4_CA_QC some unsold (hindcast) {save_timestamp}.csv\")\n", + "\n", + "# else: # prmt.run_hindcast == False\n", + "# try:\n", + "# # collect the snaps, select only Q4\n", + "# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False)\n", + "# snaps_end_Q4_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy()\n", + " \n", + "# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0:\n", + "# # export as \"all sell out (not hindcast)\"\n", + "# snaps_end_Q4_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_Q4_CA_QC all sell out (not hindcast) {save_timestamp}.csv\")\n", + "# else:\n", + "# # export as \"some unsold (not hindcast)\n", + "# snaps_end_Q4_CA_QC.to_csv(os.getcwd() + '/' + f\"snaps_end_Q4_CA_QC some unsold (not hindcast) {save_timestamp}.csv\")\n", + "# except:\n", + "# # no results; initial run using defaults, so snaps are empty\n", + "# # export would just be the same as prmt.snaps_end_Q4\n", + "# pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# if __name__ == '__main__':\n", + "# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n", + " \n", + "# avail_accum_all = pd.concat([scenario_CA.avail_accum, scenario_QC.avail_accum], axis=0, sort=False)\n", + " \n", + "# avail_accum_all.to_csv(os.getcwd() + '/' + f\"avail_accum_all all sell out {save_timestamp}.csv\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# END OF MODEL" + ] + } + ], + "metadata": { + "hide_input": false, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/data/data_input_file.xlsx b/data/data_input_file.xlsx index 1db36f2..726778a 100644 Binary files a/data/data_input_file.xlsx and b/data/data_input_file.xlsx differ