diff --git a/WCI_model.py b/WCI_model.py index da870fb..f801cb7 100644 --- a/WCI_model.py +++ b/WCI_model.py @@ -7,7 +7,7 @@ # # ## Developed by [Near Zero](http://nearzero.org) # -# ### Version 1.0 (Oct 10, 2018) +# ### Version 1.0.1 (Oct 15, 2018) # # This model simulates the supply-demand balance of the Western Climate Initiative cap-and-trade program, jointly operated by California and Quebec. # @@ -59,7 +59,7 @@ # from bokeh.models.tools import SaveTool from bokeh.models import Legend from bokeh.layouts import gridplot -from bokeh.palettes import viridis +from bokeh.palettes import Viridis, Blues, YlOrBr # note: Viridis is a dict; viridis is a function # # for html markup box # from bokeh.io import output_file, show @@ -104,6 +104,9 @@ class Prmt(): """ def __init__(self): + + self.model_version = '1.0.1' + self.online_settings_auction = True # will be overridden below for testing; normally set by user interface self.years_not_sold_out = () # set by user interface self.fract_not_sold = float(0) # set by user interface @@ -234,11 +237,11 @@ def load_input_files(): # main input_file try: prmt.input_file = pd.ExcelFile(prmt.input_file_raw_url_short) - logging.info("downloaded input file from short url") + # logging.info("downloaded input file from short url") # prmt.loading_msg_pre_refresh += ["Loading input file..."] # for UI except: prmt.input_file = pd.ExcelFile(prmt.blob_master + prmt.input_file_raw_url_short) - logging.info("downloaded input file from full url") + logging.info("downloaded input file using full url") # prmt.loading_msg_pre_refresh += ["Downloading input file..."] # for UI # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -2793,13 +2796,13 @@ def redesignate_unsold_advance_as_advance(all_accts, juris): sales_pct_adv_Q2 = df.at[f"{cq.date.year}Q2"] sales_pct_adv_Q3 = df.at[f"{cq.date.year}Q3"] -# # for db +# # for debugging # if use_fake_data == True: # # for 2017Q4, override actual value for adv sales in 2017Q2; set to 100% # # this allows redesignation of unsold from 2017Q1 in 2017Q4 # if cq.date == quarter_period('2017Q4'): # sales_pct_adv_Q2 = float(1) -# # end db +# # end debugging if sales_pct_adv_Q2 == float(1) and sales_pct_adv_Q3 == float(1): # 100% of auction sold; redesignate unsold from Q1, up to limit @@ -2914,6 +2917,9 @@ def process_auction_adv_all_accts(all_accts, juris): # iterate through all rows for available allowances; remove those sold # create df to collect sold quantities; initialize with zeros + # sort_index so that earliest vintages are drawn from first + adv_avail_1j_1q = adv_avail_1j_1q.sort_index() + adv_sold_1j_1q = adv_avail_1j_1q.copy() adv_sold_1j_1q['quant'] = float(0) @@ -3213,6 +3219,9 @@ def reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q): # initialize df to collect all reintro reintro_1j_1q = prmt.standard_MI_empty.copy() + # sort_index to ensure that earliest vintages are drawn from first + reintro_eligible_1j = reintro_eligible_1j.sort_index() + for row in reintro_eligible_1j.index: if max_cur_reintro_1j_1q_remaining == 0: break @@ -3223,7 +3232,7 @@ def reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q): # update accumulator for amount reintro so far in present quarter (may be more than one batch) reintro_1q_quant += reintro_one_batch_quantity - + # update un-accumulator for max_cur_reintro_1j_1q_remaining (may be more than one batch) max_cur_reintro_1j_1q_remaining += -1*reintro_one_batch_quantity @@ -3456,6 +3465,9 @@ def process_auction_cur_CA_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + reintro_avail_1q = reintro_avail_1q.sort_index() + reintro_sold_1q = reintro_avail_1q.copy() reintro_sold_1q['quant'] = float(0) @@ -3523,6 +3535,9 @@ def process_auction_cur_CA_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + new_avail_1q = new_avail_1q.sort_index() + new_sold_1q = new_avail_1q.copy() new_sold_1q['quant'] = float(0) @@ -4967,6 +4982,9 @@ def transfer_QC_alloc_trueups__from_alloc_hold(all_accts): # create df of those transferred: # copy whole df trueup_potential, zero out values, then set new values in loop + # sort_index to ensure that earliest vintages are drawn from first + trueup_potential = trueup_potential.sort_index() + trueup_transfers = trueup_potential.copy() trueup_transfers['quant'] = float(0) # note: trueup_transfers winds up with zero rows because it is not built up from appending rows @@ -5621,9 +5639,12 @@ def retire_for_EIM_outstanding(all_accts): # get quantity to be retired in cq.date.year; # initialization of variable that will be updated EIM_retirements = assign_EIM_retirements() + EIM_remaining = EIM_retirements.at[cq.date.year] # create df for adding transfers; copy of retire_potential, but with values zeroed out + # sort_index to ensure earliest vintages are drawn from first + retire_potential = retire_potential.sort_index() to_retire = retire_potential.copy() to_retire['quant'] = float(0) @@ -5646,7 +5667,7 @@ def retire_for_EIM_outstanding(all_accts): 'inst_cat': 'EIM_retire', 'date_level': cq.date} to_retire = multiindex_change(to_retire, mapping_dict) - + # concat to_retire with all_accts remainder all_accts = pd.concat([all_accts.loc[~mask], retire_potential, to_retire], sort=True) @@ -5906,6 +5927,9 @@ def process_auction_cur_QC_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + reintro_avail_1q = reintro_avail_1q.sort_index() + reintro_sold_1q = reintro_avail_1q.copy() reintro_sold_1q['quant'] = float(0) @@ -5972,6 +5996,9 @@ def process_auction_cur_QC_all_accts(all_accts): # (code adapted from avail_to_sold) # start by creating df from avail, with values zeroed out + # sort_index to ensure that earliest vintages are drawn from first + new_avail_1q = new_avail_1q.sort_index() + new_sold_1q = new_avail_1q.copy() new_sold_1q['quant'] = float(0) @@ -6285,17 +6312,22 @@ def create_progress_bar(wid): def assign_EIM_retirements(): - logging.info(f"initialization: {inspect.currentframe().f_code.co_name} (start)") + """ + Assign quantities for EIM Outstanding Emissions retirements in 2018, 2019, and 2020. + + These are for EIM Outstanding Emissions incurred in 2017, 2018, and 2019Q1. + + As of Oct. 2018, there was no clear data on quantities to be retired for EIM Outstanding Emissions. + + Therefore values here are set to zero until more information is available. - # *possible* assumption that would be consistent with what ARB said during informal process: - # assume 5 MMTCO2e retired for EIM incurred in 2017 (processed in 2018) - # assume 5 MMTCO2e retired for EIM incurred in 2018 (processed in 2019) - # assume 5/4 MMTCO2e retired for EIM incurred in 2019Q1 (processed in 2020) - # (units MMTCO2e) + """ + logging.info(f"initialization: {inspect.currentframe().f_code.co_name} (start)") EIM_retirements_dict = {2018: 0, 2019: 0, - 2020: 0} + 2020: 0 / 4} + EIM_retirements = pd.Series(EIM_retirements_dict) EIM_retirements.name = 'EIM_retirements' EIM_retirements.index.name = 'year processed' @@ -6305,6 +6337,14 @@ def assign_EIM_retirements(): # ~~~~~~~~~~~~~~~~~~ def assign_bankruptcy_retirements(): + """ + Handling of bankruptcy retirements based on "2018 Regulation Documents (Narrow Scope)": + https://www.arb.ca.gov/regact/2018/capandtradeghg18/capandtradeghg18.htm + + Quantity for 2019 based on ARB statement in ARB, "Supporting Material for Assessment of Post-2020 Caps" (Apr 2018): + https://www.arb.ca.gov/cc/capandtrade/meetings/20180426/carb_post2020caps.pdf + "Approximately 5 million allowances to be retired in response to a recent bankruptcy" + """ logging.info(f"initialization: {inspect.currentframe().f_code.co_name} (start)") # bankruptcy retirements (units MMTCO2e) @@ -7679,7 +7719,6 @@ def create_figures(): border_line_color=None) p1.add_layout(legend, 'below') - # p1.add_tools(SaveTool()) em_CAQC_fig = p1 @@ -7689,7 +7728,11 @@ def create_figures(): # set y_max using balance_source, where balance is bank + unsold y_max = (int(prmt.balance.max() / 100) + 1) * 100 - y_min = (int(prmt.reserve_sales.min() / 100) - 1) * 100 + if prmt.reserve_sales.min() == 0: + y_min = 0 + else: + # then abs(prmt.reserve_sales.min()) > 0 + y_min = (int(prmt.reserve_sales.min() / 100) - 1) * 100 p2 = figure(title='private bank and unsold allowances (cumulative)', height = 600, width = 700, @@ -7699,10 +7742,13 @@ def create_figures(): # toolbar_sticky=False, ) - p2.yaxis.axis_label = "MMTCO2e" + p2.xaxis.axis_label = "at end of each year" p2.xaxis.major_label_standoff = 10 p2.xaxis.minor_tick_line_color = None + + p2.yaxis.axis_label = "MMTCO2e" p2.yaxis.minor_tick_line_color = None + p2.outline_line_color = "white" # p2.min_border_top = 10 p2.min_border_right = 15 @@ -7711,26 +7757,26 @@ def create_figures(): unsold_vbar = p2.vbar(prmt.balance.index, top=prmt.balance, width=1, - color=viridis(6)[4], # 'limegreen', - line_width=1, line_color='black') + color=Viridis[6][4], + line_width=1, line_color='dimgray') bank_vbar = p2.vbar(prmt.bank_cumul_pos.index, top=prmt.bank_cumul_pos, width=1, - color=viridis(6)[3], # 'seagreen', - line_width=1, line_color='black') + color=Viridis[6][3], + line_width=0.5, line_color='dimgray') reserve_vbar = p2.vbar(prmt.reserve_sales.index, top=prmt.reserve_sales, width=1, color='tomato', - line_width=1, line_color='black') + line_width=0.5, line_color='dimgray') # add vertical line for divider between full historical data vs. projection (partial or full) p2.line([emissions_last_hist_yr+0.5, emissions_last_hist_yr+0.5], [y_min, y_max], line_color='black', - # line_width=2, + line_width=1, line_dash='dashed') legend = Legend(items=[('private bank', [bank_vbar]), @@ -7742,7 +7788,6 @@ def create_figures(): border_line_color=None) p2.add_layout(legend, 'below') - # p2.add_tools(SaveTool()) bank_CAQC_fig_bar = p2 @@ -8110,8 +8155,8 @@ def create_offsets_tabs(): def create_export_df(): # metadata for figure_for_export - metadata_list = [] # initialize - descrip_list = [] # initialize + descrip_list = [f'WCI cap-and-trade model version {prmt.model_version}'] # initialize with model version number + metadata_list = [f'https://github.com/nearzero/WCI-cap-and-trade/tree/v{prmt.model_version}'] # initialize with model version number metadata_list_of_tuples = [] # initialize if emissions_tabs.selected_index == 0: @@ -8421,7 +8466,7 @@ def save_csv_on_click(b): display(Javascript(prmt.js_download_of_csv)) # end of save_csv_on_click - + # ~~~~~~~~~~~~~~ save_csv_button.on_click(save_csv_on_click) @@ -8446,7 +8491,47 @@ def save_csv_on_click(b): display(offsets_tabs_explainer_title) -# #### export snaps_end +# #### export snaps_end_all + +# In[ ]: + + +# if __name__ == '__main__': +# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime()) + +# if prmt.run_hindcast == True: + +# # collect the snaps +# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False) +# snaps_end_all_CA_QC = df + +# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0: +# # export as "all sell out (hindcast)" +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC all sell out (hindcast) {save_timestamp}.csv") + +# else: +# # export as "some unsold (hindcast)" +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC some unsold (hindcast) {save_timestamp}.csv") + +# else: # prmt.run_hindcast == False +# try: +# # collect the snaps, select only Q4 +# df = pd.concat(scenario_CA.snaps_end + scenario_QC.snaps_end, axis=0, sort=False) +# snaps_end_all_CA_QC = df.loc[df['snap_q'].dt.quarter==4].copy() + +# if prmt.years_not_sold_out == () and prmt.fract_not_sold == 0: +# # export as "all sell out (not hindcast)" +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC all sell out (not hindcast) {save_timestamp}.csv") +# else: +# # export as "some unsold (not hindcast) +# snaps_end_all_CA_QC.to_csv(os.getcwd() + '/' + f"snaps_end_all_CA_QC some unsold (not hindcast) {save_timestamp}.csv") +# except: +# # no results; initial run using defaults, so snaps are empty +# # export would just be the same as prmt.snaps_end_Q4 +# pass + + +# #### export snaps_end_Q4 # In[ ]: @@ -8486,4 +8571,15 @@ def save_csv_on_click(b): # pass +# In[ ]: + + +# if __name__ == '__main__': +# save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime()) + +# avail_accum_all = pd.concat([scenario_CA.avail_accum, scenario_QC.avail_accum], axis=0, sort=False) + +# avail_accum_all.to_csv(os.getcwd() + '/' + f"avail_accum_all all sell out {save_timestamp}.csv") + + # # END OF MODEL diff --git a/WCI_model_interface.ipynb b/WCI_model_interface.ipynb index a984f33..43b1335 100644 --- a/WCI_model_interface.ipynb +++ b/WCI_model_interface.ipynb @@ -3,7 +3,12 @@ { "cell_type": "markdown", "metadata": { - "hide_input": false + "deletable": false, + "editable": false, + "hide_input": false, + "run_control": { + "frozen": true + } }, "source": [ "\n", @@ -12,7 +17,7 @@ "\n", "## Developed by [Near Zero](http://nearzero.org)\n", "\n", - "### Version 1.0\n", + "### Version 1.0.1\n", "\n", "This model simulates the supply-demand balance of the Western Climate Initiative cap-and-trade program, jointly operated by California and Quebec.\n", "\n", diff --git a/WCI_model_notebook.ipynb b/WCI_model_notebook.ipynb new file mode 100644 index 0000000..1a7c464 --- /dev/null +++ b/WCI_model_notebook.ipynb @@ -0,0 +1,9264 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Western Climate Initiative cap-and-trade model\n", + "\n", + "## Developed by [Near Zero](http://nearzero.org)\n", + "\n", + "### Version 1.0.1 (Oct 15, 2018)\n", + "\n", + "This model simulates the supply-demand balance of the Western Climate Initiative cap-and-trade program, jointly operated by California and Quebec.\n", + "\n", + "---\n", + "\n", + "© Copyright 2018 by [Near Zero](http://nearzero.org). This work is licensed under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/).\n", + "\n", + "Mason Inman (minman@nearzero.org) is the project manager and technical lead for the development of this model.\n", + "\n", + "The model is open source, released under the Creative Commons license above, and is written in Python, including use of the library [Pandas](https://pandas.pydata.org/). The online user interface is built using [Jupyter](https://jupyter.org/), with figures using [Bokeh](http://bokeh.pydata.org/), and hosted online through [Binder](https://mybinder.org/).\n", + "\n", + "View the [model code](https://github.com/nearzero/WCI-cap-and-trade) on Github, and download the [model documentation](https://github.com/nearzero/WCI-cap-and-trade/blob/master/documentation.docx?raw=true).\n", + "\n", + "Near Zero gratefully acknowledges support for this work from the Energy Foundation, grant number G-1804-27647. Near Zero is solely responsible for the content. The model, its results, and its documentation are for informational purposes only and do not constitute investment advice.\n", + "\n", + "**About Near Zero**: Near Zero is a non-profit environmental research organization based at the Carnegie Institution for Science on the Stanford University campus. Near Zero provides credible, impartial, and actionable assessment with the goal of cutting greenhouse gas emissions to near zero." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# IMPORT LIBRARIES" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from pandas.tseries.offsets import *\n", + "import numpy as np\n", + "\n", + "import ipywidgets as widgets\n", + "from IPython.core.display import display # display used for widgets and for hiding code cells\n", + "from IPython.display import clear_output, Javascript # Javascript is for csv save\n", + "\n", + "import time\n", + "# from time import sleep\n", + "import datetime as dt\n", + "from datetime import datetime\n", + "\n", + "import os\n", + "import inspect # for getting name of current function\n", + "import logging\n", + "\n", + "# pd.__version__, np.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import bokeh\n", + "\n", + "from bokeh.plotting import figure, show, output_notebook # save\n", + "# from bokeh.models.tools import SaveTool\n", + "from bokeh.models import Legend\n", + "from bokeh.layouts import gridplot\n", + "from bokeh.palettes import Viridis, Blues, YlOrBr # note: Viridis is a dict; viridis is a function\n", + "\n", + "# # for html markup box\n", + "# from bokeh.io import output_file, show\n", + "\n", + "# use if working offline; also might help with Binder loading\n", + "from bokeh.resources import INLINE\n", + "\n", + "output_notebook(resources=INLINE, hide_banner=True)\n", + "# hide_banner gets rid of message \"BokehJS ... successfully loaded\"\n", + "\n", + "from bokeh.document import Document\n", + "from bokeh.models.layouts import Column" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize logging\n", + "save_timestamp = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n", + "\n", + "# start logging\n", + "# to save logs, need to update below with the correct strings and selection for your desired directory\n", + "try:\n", + " if os.getcwd().split('/')[4] == 'cap_and_trade_active_dev':\n", + " LOG_PATH = os.getcwd() + '/logs'\n", + " logging.basicConfig(filename=f\"{LOG_PATH}/WCI_cap_trade_log_{save_timestamp}.txt\", \n", + " filemode='a', # choices: 'w' or 'a'\n", + " level=logging.INFO)\n", + " else:\n", + " # don't save log\n", + " pass\n", + "except:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Prmt():\n", + " \"\"\"\n", + " Class to create object prmt that has parameters used throughout the model as its attributes.\n", + " \"\"\"\n", + " \n", + " def __init__(self):\n", + " \n", + " self.model_version = '1.0.1'\n", + " \n", + " self.online_settings_auction = True # will be overridden below for testing; normally set by user interface\n", + " self.years_not_sold_out = () # set by user interface\n", + " self.fract_not_sold = float(0) # set by user interface\n", + " \n", + " self.run_hindcast = False # set to true to start model run at beginning of each market (2012Q4/2013Q4)\n", + " \n", + " self.run_tests = True\n", + " self.verbose_log = True\n", + " self.test_failed_msg = 'Test failed!:' \n", + " \n", + " self.CA_post_2020_regs = 'Proposed_Regs_Sep_2018'\n", + " self.QC_post_2020_regs = 'Proposed_Regs_Sep_2018'\n", + " # regs choices are: 'Regs_Oct_2017', 'Preliminary_Discussion_Draft', 'Proposed_Regs_Sep_2018'\n", + " \n", + " self.neg_cut_off = 10/1e6 # units MMTCO2e; enter number of allowances (tons CO2e) in numerator\n", + " # doesn't matter whether negative or positive entered here; used with -abs(neg_cut_off)\n", + " self.show_neg_msg = False # if False, fn test_for_negative_values won't print any messages \n", + " \n", + " self.CA_start_date = pd.to_datetime('2012Q4').to_period('Q') # default\n", + " self.QC_start_date = pd.to_datetime('2013Q4').to_period('Q') # default\n", + " self.CA_end_date = pd.to_datetime('2030Q4').to_period('Q') # default\n", + " self.QC_end_date = pd.to_datetime('2030Q4').to_period('Q') # default\n", + " self.model_end_date = pd.to_datetime('2030Q4').to_period('Q') # default\n", + " \n", + " # generate list of quarters to iterate over (inclusive)\n", + " # range has DateOffset(months=3) at the end, because end of range is not included in the range generated\n", + " self.CA_quarters = pd.date_range(start=self.CA_start_date.to_timestamp(),\n", + " end=self.CA_end_date.to_timestamp() + DateOffset(months=3), \n", + " freq='Q').to_period('Q')\n", + "\n", + " # generate list of quarters to iterate over (inclusive)\n", + " # range has DateOffset(months=3) at the end, because end of range is not included in the range generated\n", + " self.QC_quarters = pd.date_range(start=self.QC_start_date.to_timestamp(),\n", + " end=self.QC_end_date.to_timestamp() + DateOffset(months=3), \n", + " freq='Q').to_period('Q')\n", + " \n", + " self.blob_master = \"https://github.com/nearzero/WCI-cap-and-trade/blob/master\"\n", + " self.input_file_raw_url_short = \"/data/data_input_file.xlsx?raw=true\"\n", + " self.CIR_raw_url_short = \"/data/CIR_file.xlsx?raw=true\"\n", + " \n", + " self.snaps_end_Q4 = '' # value filled in by fn download_input_files\n", + " self.snaps_end_Q4_sum = '' # value filled in by fn download_input_files\n", + " \n", + " self.CA_cap_adjustment_factor = '' # value filled in by fn download_input_files\n", + " \n", + " self.NaT_proxy = pd.to_datetime('2200Q1').to_period('Q')\n", + " \n", + " self.standard_MI_names = ['acct_name', 'juris', 'auct_type', 'inst_cat', 'vintage', 'newness', 'status', \n", + " 'date_level', 'unsold_di', 'unsold_dl', 'units']\n", + " \n", + " # create empty index; can be used for initializing all dfs\n", + " self.standard_MI_index = pd.MultiIndex(levels=[[]]*len(self.standard_MI_names),\n", + " labels=[[]]*len(self.standard_MI_names),\n", + " names=self.standard_MI_names)\n", + " \n", + " self.standard_MI_empty = pd.DataFrame(index=self.standard_MI_index, columns=['quant'])\n", + " \n", + " self.CIR_columns = ['gen_comp', 'limited_use', 'VRE_acct', 'A_I_A', 'retirement', 'APCR_acct', \n", + " 'env_integrity', 'early_action', 'subtotal']\n", + " \n", + " self.progress_bar_CA_count = 0 # initialize\n", + " self.progress_bar_QC_count = 0 # initialize\n", + " \n", + " self.offset_rate_fract_of_limit = 0.75 # see func offsets_projection for rationale\n", + " \n", + " self.use_fake_data = False # used for testing \n", + " \n", + " # ~~~~~~~~~~~~~~~~~ \n", + " \n", + " # set other variables to be blank; will be reset below using functions \n", + " self.qauct_hist = ''\n", + " self.auction_sales_pcts_all = '' \n", + " self.CA_cap = ''\n", + " self.CA_APCR_MI = ''\n", + " self.CA_advance_MI = ''\n", + " self.VRE_reserve_MI = ''\n", + "\n", + " self.CA_alloc_MI_all = ''\n", + " self.consign_hist_proj = ''\n", + " \n", + " self.QC_cap = ''\n", + " self.QC_advance_MI = ''\n", + " self.QC_APCR_MI = ''\n", + " self.QC_alloc_initial = ''\n", + " self.QC_alloc_trueups = ''\n", + " self.QC_alloc_full_proj = ''\n", + " \n", + " self.qauct_hist = ''\n", + " self.auction_sales_pcts_all = ''\n", + " self.qauct_new_avail = ''\n", + "\n", + " self.compliance_events = ''\n", + " self.VRE_retired = ''\n", + " self.CIR_historical = ''\n", + " self.CIR_offsets_q_sums = ''\n", + " \n", + " self.loading_msg_pre_refresh = []\n", + " self.error_msg_post_refresh = []\n", + " \n", + " self.input_file = ''\n", + " self.CIR_excel = ''\n", + " self.CA_cap_data = ''\n", + " \n", + " self.emissions_ann = ''\n", + " self.emissions_ann_CA = ''\n", + " self.emissions_ann_QC = ''\n", + " self.supply_ann = ''\n", + " self.bank_cumul_pos = ''\n", + " self.balance = ''\n", + " self.unsold_auct_hold_cur_sum = ''\n", + " self.reserve_sales = ''\n", + " \n", + " self.Fig_1_2 = ''\n", + " self.js_download_of_csv = ''\n", + " self.export_df = ''\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "# create object prmt (instance of class Prmt), after which it can be filled with more entries below\n", + "prmt = Prmt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def load_input_files():\n", + " # download each file once from Github, set each as an attribute of object prmt\n", + "\n", + " # main input_file\n", + " try:\n", + " prmt.input_file = pd.ExcelFile(prmt.input_file_raw_url_short)\n", + " # logging.info(\"downloaded input file from short url\")\n", + " # prmt.loading_msg_pre_refresh += [\"Loading input file...\"] # for UI\n", + " except:\n", + " prmt.input_file = pd.ExcelFile(prmt.blob_master + prmt.input_file_raw_url_short)\n", + " logging.info(\"downloaded input file using full url\")\n", + " # prmt.loading_msg_pre_refresh += [\"Downloading input file...\"] # for UI\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CIR quarterly\n", + " try:\n", + " prmt.CIR_excel = pd.ExcelFile(prmt.CIR_raw_url_short)\n", + " logging.info(\"downloaded CIR file from short url\")\n", + " except:\n", + " prmt.CIR_excel = pd.ExcelFile(prmt.blob_master + prmt.CIR_raw_url_short)\n", + " logging.info(\"downloaded CIR file from full url\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def snaps_end_Q4_all_sell_initialize():\n", + " \"\"\"\n", + " Modifies format of object attribute prmt.snaps_end_Q4:\n", + " * formats date columns to be in date format\n", + " * sets MultiIndex = prmt.standard_MI_names, leaves 'snap_q' as column\n", + " \n", + " \"\"\"\n", + " \n", + " # read snaps_end_Q4 from input_file sheet, set new value for object attribute\n", + " prmt.snaps_end_Q4 = pd.read_excel(prmt.input_file, sheet_name='snaps end Q4 all sell out')\n", + " \n", + " # format columns as Period (quarters)\n", + " for col in ['snap_q', 'date_level', 'unsold_di', 'unsold_dl']:\n", + " if isinstance(col, pd.Period):\n", + " pass\n", + " else:\n", + " prmt.snaps_end_Q4[col] = pd.to_datetime(prmt.snaps_end_Q4[col]).dt.to_period('Q')\n", + " \n", + " # restore np.NaN (replacing the way Excel saves them)\n", + " for col in ['auct_type', 'inst_cat', 'newness', 'status']:\n", + " prmt.snaps_end_Q4[col] = prmt.snaps_end_Q4[col].replace(np.NaN, 'n/a')\n", + " \n", + " # set MultiIndex as standard_MI_names; snap_q will remain as column next to 'quant'\n", + " prmt.snaps_end_Q4 = prmt.snaps_end_Q4.set_index(prmt.standard_MI_names)\n", + " \n", + " # calculate sum (for testing); set as new value of object attribute\n", + " prmt.snaps_end_Q4_sum = prmt.snaps_end_Q4['quant'].sum()\n", + " \n", + " # no return; modifies object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run function to download files\n", + "load_input_files()\n", + "\n", + "# get snaps_end_Q4\n", + "snaps_end_Q4_all_sell_initialize()\n", + "\n", + "# set CA_cap_data\n", + "prmt.CA_cap_data = pd.read_excel(prmt.input_file, sheet_name='CA cap data')\n", + "logging.info(\"read CA_cap_data\")\n", + "\n", + "# set CA_cap_adjustment_factor\n", + "prmt.CA_cap_adjustment_factor = prmt.CA_cap_data[\n", + " prmt.CA_cap_data['name']=='CA_cap_adjustment_factor'].set_index('year')['data']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# from WCI_model_explainer_text_v2.py (2018-10-10)\n", + "\n", + "figure_explainer_text = \"
Below, the figure on the left shows covered emissions compared with the supply of compliance instruments (allowances and offsets) that enter private market participants’ accounts through auction sales or direct allocations from WCI governments.
The model tracks the private bank of allowances, defined as the number of allowances held in private accounts in excess of compliance obligations those entities face under the program in any given year. When the supply of compliance instruments entering private accounts is greater than covered emissions in a given year, the private bank increases. When the supply of compliance instruments entering private accounts is less than covered emissions, the private bank decreases.
The figure on the right shows the running total of compliance instruments banked in private accounts. In addition, the graph shows any allowances that went unsold in auctions. These allowances are held in government accounts until they are either reintroduced at a later auction or removed from the normal auction supply subject to market rules.
If the private bank is exhausted, the model simulates reserve sales to meet any remaining outstanding compliance obligations, based on the user-defined emissions projection. Starting in 2021, if the supply of allowances held in government-controlled reserve accounts is exhausted, then an unlimited quantity of instruments called “price ceiling units” will be available at a price ceiling to meet any remaining compliance obligations. The model tracks the sale of reserve allowances and price ceiling units in a single composite category.
For more information about the banking metric used here, see Near Zero's Sep. 2018 report, Tracking Banking in the Western Climate Initiative Cap-and-Trade Program.
\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "\n", + "em_explainer_text = \"The WCI cap-and-trade program covers emissions from electricity suppliers, large industrial facilities, and natural gas and transportation fuel distributors.
By default, the model uses a projection in which covered emissions decrease 2% per year, starting from emissions in 2016 (the latest year with official reporting data). Users can specify higher or lower emissions scenarios using the available settings.
A 2% rate of decline follows ARB's 2017 Scoping Plan scenario for California emissions, which includes the effects of prescriptive policy measures (e.g., the Renewables Portfolio Standard for electricity), but does not incorporate effects of the cap-and-trade program.
Note that PATHWAYS, the model ARB used to generate the Scoping Plan scenario, does not directly project covered emissions in California. Instead, the PATHWAYS model tracks emissions from four economic sectors called “covered sectors,” which together constitute about ~10% more emissions than the “covered emissions” that are actually subject to the cap-and-trade program in California. For more information, see Near Zero's May 2018 report on this discrepancy. Users can define their own emission projections to explore any scenario they like, as the model makes no assumptions about future emissions aside from what the user provides.
\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "# note: noand
for this text block\n", + "em_custom_footnote_text = \"Copy and paste from data table in Excel.
Format: column for years on left, column for emissions data on right. Please copy only the data, without headers (see example).
Projection must cover each year from 2017 to 2030. (Data entered for years prior to 2017 and after 2030 will be discarded.)
Units must be million metric tons CO2e/year (MMTCO2e).
\"\n", + "\n", + "# ~~~~~~~~~~~~~~~~~~\n", + "\n", + "auction_explainer_text = \"
WCI quarterly auctions include two separate offerings: a current auction of allowances with vintage years equal to the current calendar year (as well as any earlier vintages of allowances that went unsold and are being reintroduced), and a separate advance auction featuring a limited number of allowances with a vintage year equal to three years in the future.
By default, the model assumes that all future auctions sell out. However, users can specify a custom percentage of allowances to go unsold at auction in one or more years. This percentage applies to both current and advance auctions, in each quarter of the user-specified years.
To date, most current auctions have sold out. But in 2016 and 2017, 143 million current allowances went unsold as sales collapsed over several auctions. Pursuant to market rules, most of these allowances are now being reintroduced for sale in current auctions.
If California state-owned allowances remain unsold for more than 24 months, they are removed from the normal auction supply and transferred to the market reserve accounts. Quebec's current regulations do not contain a similar stipulation. We calculate that this self-correction mechanism will remove 38 – 52 million previously unsold allowances from the normal auction supply, with the exact amount dependent on the outcomes of the next two quarterly auctions. The remaining 91 – 105 million allowance will have been reintroduced at auction.
For more information, see Near Zero's May 2018 report on this self correction
mechanism.
In addition to submitting allowances to satisfy their compliance obligations, entities subject to the cap-and-trade program can also submit a certain number of offset credits instead. These credits represent emission reductions that take place outside of the cap-and-trade program and are credited pursuant to an approved offset protocol.
For California, the limits on offset usage are equal to a percentage of a covered entity’s compliance obligations: through 2020, the limit is 8%; from 2021 through 2025, the limit is 4%; and from 2026 through 2030, the limit is 6%. For Quebec, the limit is 8% for all years.
The model incorporates actual offset supply through Q3 2018, based on ARB’s Q3 2018 compliance instrument report for the WCI system. By default, the model assumes offset supply in any year is equivalent to three-quarters of the limit in each jurisdiction, reflecting ARB’s assumptions in the current proposed cap-and-trade regulations. Users can specify a higher or lower offset supply using the available settings.
Like allowances, offsets can also be banked for future use. Thus, we include offsets in our banking calculations. If the user-specified offset supply exceeds what can be used through 2030, given the user-specified emissions projection, then the model calculates this excess and warns the user.
For more on offsets, see Near Zero’s Mar. 2018 report, Interpreting AB 398’s Carbon Offset Limits. For more information on offset credits’ role in banking, see Near Zero's Sep. 2018 report, Tracking Banking in the Western Climate Initiative Cap-and-Trade Program.
\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# AUCTION METADATA KEY" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "hidden": true + }, + "source": [ + "**newness:**\n", + "* 'new' (fka 'newly available'; means *never* before introduced)\n", + "* 'reintro' (we use \"reintroduction\" only for state-owned that went unsold in current & are brought back again)\n", + "* 'redes' [defunct?]\n", + "\n", + "**status:**\n", + "* 'available'\n", + "* 'sold'\n", + "* 'unsold' (use for unsold stock; can be made available under right circumstances)\n", + "* 'not_avail'\n", + "\n", + "**auct_type:**\n", + "* 'current'\n", + "* 'advance'\n", + "* 'reserve'\n", + "\n", + "**juris:** (jurisdiction)\n", + "* CA, QC, ON\n", + "\n", + "**inst_cat:**\n", + "* 'CA'\n", + "* 'CA_alloc'\n", + "* 'consign' (could be elec or nat gas, IOU or POU)\n", + "* 'QC'\n", + "* 'QC\\_alloc\\_[year]'\n", + "* 'ON'\n", + "* 'QC\\_alloc\\_[year]\\_APCR' (anomalous; only used once so far)\n", + "\n", + "**date_level:**\n", + "* for auctions, it is either:\n", + " * the latest date in which allowances were auctioned\n", + " * the future date in which they're scheduled to be auctioned\n", + "* for allocations, it is the date in which they were distributed\n", + "* for retirements (i.e., VRE), it is the date in which they were retired" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# HOUSEKEEPING FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def multiindex_change(df, mapping_dict): \n", + " \"\"\"\n", + " Housekeeping function: updates an index level, even when repeated values in the index.\n", + " \n", + " Reason for this:\n", + " Pandas .index.set_levels is limited in how it works, and when there are repeated values in the index level,\n", + " it runs, but with spurious results.\n", + " \n", + " Note: This function does not work on Series, because Pandas doesn't include Series.set_index.\n", + "\n", + " mapping_dict is dictionary with each key = level_name & each value = ''\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " try:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + " except:\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name}\")\n", + "\n", + " # get index names before changing anything\n", + " df_index_names = df.index.names\n", + " \n", + " # create empty list (initialization) in which all changed data will be put\n", + " df_level_changed_all = []\n", + " \n", + " for level_name in mapping_dict.keys():\n", + " df_level_changed = df.index.get_level_values(level_name).map(lambda i: mapping_dict[level_name]) \n", + " df.index = df.index.droplevel(level_name)\n", + " \n", + " df_level_changed_all += [df_level_changed]\n", + "\n", + " # after making changes to all levels in dict\n", + " df = df.set_index(df_level_changed_all, append=True)\n", + " df = df.reorder_levels(df_index_names)\n", + " \n", + " return(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_ser_to_df_MI(ser):\n", + " \"\"\"\n", + " Converts certain Series into MultiIndex df. Works for cap, APCR, advance, VRE.\n", + " \n", + " (Now apparently used only in initialization for CA and QC auctions.)\n", + " \n", + " Housekeeping function.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start), for {ser.name}\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " \n", + " if len(df.columns==1):\n", + " df.columns = ['quant']\n", + " else:\n", + " print(\"Error\" + \"! In convert_cap_to_MI, len(df.columns==1) was False.\")\n", + " \n", + " if ser.name.split('_')[0] in ['CA', 'VRE']:\n", + " juris = 'CA'\n", + " elif ser.name.split('_')[0] == 'QC':\n", + " juris = 'QC'\n", + " \n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + "\n", + " # default metadata values are for cap\n", + " df['acct_name'] = 'alloc_hold'\n", + " df['juris'] = juris # established above\n", + " df['inst_cat'] = 'cap'\n", + " # vintage already assigned above\n", + " df['auct_type'] = 'n/a'\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['date_level'] = prmt.NaT_proxy\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " # overwrite metadata for other sets of instruments\n", + " if 'APCR' in ser.name:\n", + " df['acct_name'] = 'APCR_acct'\n", + " df['inst_cat'] = 'APCR'\n", + " df['auct_type'] = 'reserve'\n", + " elif 'advance' in ser.name:\n", + " df['acct_name'] = 'auct_hold'\n", + " df['inst_cat'] = ser.name.split('_')[0] # same as juris\n", + " df['auct_type'] = 'advance'\n", + " df['newness'] = 'new'\n", + " df['status'] = 'not_avail'\n", + " elif 'VRE' in ser.name:\n", + " df['acct_name'] = 'VRE_acct'\n", + " # df['juris'] = 'CA'\n", + " df['inst_cat'] = 'VRE_reserve'\n", + " df['status'] = 'n/a'\n", + " else:\n", + " pass\n", + " \n", + " df = df.set_index(prmt.standard_MI_names)\n", + " return(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_ser_to_df_MI_CA_alloc(ser):\n", + " \"\"\"\n", + " Converts certain Series into MultiIndex df. Works for CA allocations.\n", + " \n", + " Housekeeping function\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start), for series {ser.name}\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + " \n", + " not_consign_list = ['elec_POU_not_consign', 'nat_gas_not_consign', 'industrial_etc_alloc']\n", + "\n", + " if ser.name in not_consign_list:\n", + " df['acct_name'] = 'ann_alloc_hold'\n", + " df['auct_type'] = 'n/a'\n", + " df['juris'] = 'CA'\n", + " df = df.rename(columns={'alloc': 'quant'})\n", + "\n", + " elif ser.name in ['consign_elec_IOU', 'consign_elec_POU', 'consign_nat_gas']:\n", + " df['acct_name'] = 'limited_use'\n", + " df['auct_type'] = 'current'\n", + " df['juris'] = 'CA'\n", + " df = df.rename(columns={'alloc': 'quant'})\n", + " # TO DO: ¿also change inst_cat?\n", + " # don't change newness to new, nor status to not_avail, until consign are in auct_hold\n", + " \n", + " else: # closing 'if alloc.name in not_consign_list:'\n", + " print(\"Error\" + \"!: Series name is not in either list above.\")\n", + " \n", + " # acct_name set above\n", + " df['date_level'] = prmt.NaT_proxy\n", + " # juris set above\n", + " # vintage set above\n", + " df['inst_cat'] = ser.name\n", + " # auct_type set above\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + " \n", + " # rename column with quantities of allowances from ser.name to 'quant'\n", + " df = df.rename(columns={ser.name: 'quant'})\n", + " df_MI = df.set_index(prmt.standard_MI_names)\n", + "\n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end), for series {ser.name}\")\n", + " \n", + " return(df_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def convert_ser_to_df_MI_QC_alloc(ser):\n", + " \"\"\"\n", + " Converts certain Series into MultiIndex df. Works for QC allocation.\n", + " \n", + " Will put the QC_alloc into gen_acct.\n", + " \n", + " Housekeeping function.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + " \n", + " if 'QC_alloc' in ser.name:\n", + " df['acct_name'] = 'gen_acct'\n", + " df['auct_type'] = 'n/a'\n", + " df['juris'] = 'QC'\n", + " # vintage set above\n", + " df['inst_cat'] = f'QC_alloc_{cq.date.year}'\n", + " df['date_level'] = cq.date\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + " \n", + " df = df.rename(columns={'QC_alloc': 'quant'})\n", + " \n", + " else: # closing 'if alloc.name in\n", + " print(\"Error\" + \"!: Series name is not in list above. Metadata not added.\")\n", + "\n", + " df = df.set_index(prmt.standard_MI_names)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# NEW FOR ONLINE (updated version)\n", + "\n", + "def quarter_period(year_quart):\n", + " \"\"\"\n", + " Converts string year_quart (i.e., '2013Q4') into datetime quarterly period.\n", + " \"\"\"\n", + " \n", + " if isinstance(year_quart, pd.Period) == True:\n", + " period = year_quart\n", + " \n", + " else:\n", + " period = pd.to_datetime(year_quart).to_period('Q')\n", + " \n", + " return(period)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# INITIALIZATION FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_qauct_hist():\n", + " \"\"\"\n", + " Read historical auction data from file qauct_hist.\n", + " \n", + " Covers all auctions through 2018Q3, for CA, QC, ON\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialize: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # qauct_hist is a full record of auction data, compiled from csvs using another notebook\n", + " qauct_hist = pd.read_excel(prmt.input_file, sheet_name='quarterly auct hist')\n", + " \n", + " # rename field 'auction date' to 'date_level'\n", + " qauct_hist = qauct_hist.rename(columns={'auction date': 'date_level'})\n", + " \n", + " # format 'date_level' as quarter period\n", + " qauct_hist['date_level'] = pd.to_datetime(qauct_hist['date_level']).dt.to_period('Q')\n", + " \n", + " # set object attribute\n", + " prmt.qauct_hist = qauct_hist\n", + " \n", + " # no return; func sets object attribute" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_auction_sales_pcts_all():\n", + " \"\"\"\n", + " Combine historical and projection, and clean up to remove overlap.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # call functions to get historical and projection data\n", + " auction_sales_pcts_historical = get_auction_sales_pcts_historical()\n", + " auction_sales_pcts_projection = get_auction_sales_pcts_projection_from_user_settings()\n", + " \n", + " # get date of last quarter of historical data, for eliminating overlap\n", + " auction_sales_last_historical_q = auction_sales_pcts_historical.index.get_level_values('date_level').max()\n", + "\n", + " # remove overlapping quarters from auction_sales_pcts_projection\n", + " df = auction_sales_pcts_projection.copy()\n", + " df = df.loc[df.index.get_level_values('date_level') > auction_sales_last_historical_q]\n", + "\n", + " # append remaining projection to historical\n", + " df = auction_sales_pcts_historical.append(df)\n", + " df = df.astype(float)\n", + " \n", + " prmt.auction_sales_pcts_all = df\n", + "\n", + " # no return; func sets object attribute" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_auction_sales_pcts_historical():\n", + " \"\"\"\n", + " Calculates historical auction sales percentages, drawing from historical record (qauct_hist).\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create record of auction sales percentages (from qauct_hist)\n", + " df = prmt.qauct_hist.copy()\n", + " df = df[~df['inst_cat'].isin(['IOU', 'POU'])]\n", + " df = df.groupby(['market', 'auct_type', 'date_level'])[['Available', 'Sold']].sum()\n", + " df['sold_pct'] = df['Sold'] / df['Available']\n", + "\n", + " auction_sales_pcts_historical = df['sold_pct']\n", + " \n", + " return(auction_sales_pcts_historical)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_auction_sales_pcts_projection_from_user_settings():\n", + " \"\"\"\n", + " Read values for auction sales percentages in projection, as specified by user interface.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " years_not_sold_out = prmt.years_not_sold_out\n", + " fract_not_sold = prmt.fract_not_sold\n", + " \n", + " if prmt.online_settings_auction == True:\n", + " proj = []\n", + " market = 'CA-QC' \n", + " \n", + " # fill in projection using user settings for years_not_sold_out & fract_not_sold\n", + " for year in range(2018, 2030+1):\n", + " for quarter in [1, 2, 3, 4]:\n", + " date_level = quarter_period(f\"{year}Q{quarter}\")\n", + " \n", + " # add current auction projections\n", + " # any quarters that overlap with historical data will be discarded when hist & proj are combined\n", + " auct_type = 'current' \n", + " if year in years_not_sold_out:\n", + " proj += [(market, auct_type, date_level, (1 - fract_not_sold))]\n", + " else:\n", + " # set to fract_not_sold to 0% (sold is 100%) for all years not in years_not_sold_out\n", + " proj += [(market, auct_type, date_level, 1.0)]\n", + " \n", + " # add advance auction projections; assume all auctions sell 100%\n", + " # any quarters that overlap with historical data will be discarded when hist & proj are combined\n", + " auct_type = 'advance'\n", + " if year in years_not_sold_out:\n", + " proj += [(market, auct_type, date_level, (1 - fract_not_sold))]\n", + " else:\n", + " # set to fract_not_sold to 0% (sold is 100%) for all years not in years_not_sold_out\n", + " proj += [(market, auct_type, date_level, 1.0)]\n", + " \n", + " proj_df = pd.DataFrame(proj, columns=['market', 'auct_type', 'date_level', 'value'])\n", + " ser = proj_df.set_index(['market', 'auct_type', 'date_level'])['value']\n", + " ser = ser.sort_index()\n", + " auction_sales_pcts_projection = ser\n", + " \n", + " else: \n", + " # model will use default auction sales projection of 100% sales every quarter after 2018Q3\n", + " pass\n", + " \n", + " return(auction_sales_pcts_projection)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_cap():\n", + " \"\"\"\n", + " CA cap quantities from § 95841. Annual Allowance Budgets for Calendar Years 2013-2050:\n", + " * Table 6-1: 2013-2020 California GHG Allowance Budgets\n", + " * Table 6-2: 2021-2031 California GHG Allowance Budgets\n", + " * 2032-2050: equation for post-2031 cap\n", + " \"\"\"\n", + " \n", + " CA_cap_data = prmt.CA_cap_data\n", + " \n", + " CA_cap = CA_cap_data[CA_cap_data['name']=='CA_cap']\n", + " CA_cap = CA_cap.set_index('year')['data']\n", + " CA_cap = CA_cap.loc[:2030]\n", + " CA_cap.name = 'CA_cap'\n", + "\n", + " logging.info('initialize: CA_cap')\n", + " \n", + " return(CA_cap)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_APCR():\n", + " \"\"\"\n", + " In current regs (Oct 2017), quantities for APCR for budget years 2013-2020 defined as percentage of budget. \n", + " 2013-2020 specified in regs § 95870(a):\n", + " * (1) One percent of the allowances from budget years 2013-2014;\n", + " * (2) Four percent of the allowances from budget years 2015-2017; and\n", + " * (3) Seven percent of the allowances from budget years 2018-2020.\n", + "\n", + " In current regs (Oct 2017), quantities for APCR for budget years 2021-2030 defined as total quantities.\n", + " (See § 95871(a) and Table 8-2 (as of Oct 2017))\n", + " \n", + " In proposed new regs (Sep 2018), quantities for APCR for budget years 2021-2030 defined as total quantities.\n", + " (See § 95871(a) and Table 8-2 (as of Sep 2018), which is updated from Oct 2017 version of regs)\n", + " \"\"\"\n", + " \n", + " logging.info('initialize_CA_APCR')\n", + "\n", + " CA_cap = prmt.CA_cap\n", + " CA_cap_data = prmt.CA_cap_data\n", + " CA_post_2020_regs = prmt.CA_post_2020_regs\n", + "\n", + " # for 2013-2020: get cap & reserve fraction from input file\n", + " # calculate APCR amounts\n", + " CA_APCR_fraction = CA_cap_data[CA_cap_data['name']=='CA_APCR_fraction']\n", + " CA_APCR_fraction = CA_APCR_fraction.set_index('year')['data']\n", + " CA_APCR_2013_2020 = CA_cap * CA_APCR_fraction\n", + " CA_APCR_2013_2020 = CA_APCR_2013_2020.loc[2013:2020]\n", + "\n", + " # for 2021-2031: get APCR amounts from input file\n", + " CA_APCR_2021_2031 = CA_cap_data[CA_cap_data['name']=='CA_APCR']\n", + " CA_APCR_2021_2031 = CA_APCR_2021_2031.set_index('year')['data']\n", + "\n", + " # only keep through 2030\n", + " CA_APCR_2021_2030 = CA_APCR_2021_2031.loc[2021:2030]\n", + "\n", + " CA_APCR = CA_APCR_2013_2020.append(CA_APCR_2021_2030)\n", + " CA_APCR.name = 'CA_APCR'\n", + "\n", + " # new regulations for CA:\n", + " if prmt.CA_post_2020_regs in ['Preliminary_Discussion_Draft', 'Proposed_Regs_Sep_2018']:\n", + " # move additional 2% of cap for 2026-2030 to APCR; \n", + " # do this by removing equal amount from each annual budget 2021-2030\n", + " # as stated in \"Price Concepts\" paper, this is 2.272600 MMTCO2e per year\n", + " # and as stated in the \"Post-2020 Caps\" paper, it would be a total of ~22.7M allowances\n", + " CA_APCR_extra_sum = CA_cap.loc[2026:2030].sum() * 0.02\n", + " CA_APCR_extra_ann = CA_APCR_extra_sum / len(range(2021, 2030+1))\n", + " CA_APCR_new_2021_2030 = CA_APCR.loc[2021:2030] + CA_APCR_extra_ann\n", + " CA_APCR = CA_APCR.loc[2013:2020].append(CA_APCR_new_2021_2030)\n", + " # if other proposals for new regulations, but them here\n", + " else:\n", + " pass\n", + " \n", + " CA_APCR_MI = convert_ser_to_df_MI(CA_APCR)\n", + "\n", + " logging.info('initialize: CA_APCR')\n", + " \n", + " return(CA_APCR_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_advance():\n", + " \"\"\"\n", + " Fraction of CA cap that is set aside for advance is defined in regulations.\n", + " \n", + " For 2013-2020: § 95870(b)\n", + " For 2021-2030: § 95871(b)\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info('initialize: CA_advance')\n", + " \n", + " CA_cap = prmt.CA_cap\n", + " CA_cap_data = prmt.CA_cap_data\n", + " \n", + " CA_advance_fraction = CA_cap_data[CA_cap_data['name']=='CA_advance_fraction']\n", + " CA_advance_fraction = CA_advance_fraction.set_index('year')['data']\n", + "\n", + " CA_advance = (CA_cap * CA_advance_fraction).fillna(0)\n", + " CA_advance.name ='CA_advance'\n", + "\n", + " CA_advance_MI = convert_ser_to_df_MI(CA_advance)\n", + "\n", + " return(CA_advance_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_VRE_reserve():\n", + " \"\"\"\n", + " DOCSTRINGS\n", + " \"\"\"\n", + " logging.info('initialize_VRE_reserve')\n", + " \n", + " CA_cap = prmt.CA_cap\n", + " CA_cap_data = prmt.CA_cap_data\n", + "\n", + " VRE_fraction = CA_cap_data[CA_cap_data['name']=='CA_Voluntary_Renewable_fraction']\n", + " VRE_fraction = VRE_fraction.set_index('year')['data']\n", + "\n", + " VRE_reserve = CA_cap * VRE_fraction\n", + "\n", + " for year in range(2021, 2030+1):\n", + " VRE_reserve.at[year] = float(0)\n", + "\n", + " VRE_reserve.name = 'VRE_reserve'\n", + "\n", + " VRE_reserve_MI = convert_ser_to_df_MI(VRE_reserve)\n", + " \n", + " return(VRE_reserve_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def read_CA_alloc_data():\n", + " \"\"\" \n", + " Reads historical allocation data, as well as cap adjustment factors\n", + " \n", + " CA allocations use cap adjustment factor from § 95891, Table 9-2\n", + " \n", + " note: Input file only includes the standard cap adjustment factors.\n", + " \n", + " If need be, can add to input file the non-standard for particular process intensive industries.\n", + " \"\"\"\n", + " logging.info('read_CA_alloc_data')\n", + " \n", + " CA_alloc_data = pd.read_excel(prmt.input_file, sheet_name='CA alloc data')\n", + " \n", + " return(CA_alloc_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_elec_alloc():\n", + " \"\"\"\n", + " 2021-2030 Electrical Distribution Utility Sector Allocation (IOU & POU):\n", + "\n", + " 2021-2030: § 95871(c)(1)\n", + " details determined by 95892(a), with allocation quantities explicitly stated in § 95892 Table 9-4\n", + " (data copied from pdf (opened in Adobe Reader) into Excel; saved in input file)\n", + " but utilities not identified in Table 9-4 as IOU or POU\n", + " so merge with 2013-2020 df, and then compute sums for whole time span 2013-2030\n", + " (also note this does not go through 2031, as cap does)\n", + " \"\"\"\n", + "\n", + " logging.info('initialize_elec_alloc')\n", + " \n", + " # create elec_alloc_2013_2020\n", + "\n", + " # read input file; \n", + " # has '-' for zero values in some cells; make those NaN, replace NaN with zero; then clean up strings\n", + " df = pd.read_excel(prmt.input_file, sheet_name='CA elec alloc 2013-2020', na_values='-')\n", + " df = df.fillna(0)\n", + " df = df.replace('\\xa0', '', regex=True)\n", + "\n", + " # convert all data to int, which rounds down any fractional allowances\n", + " for column in range(2013, 2020+1):\n", + " df[column] = df[column].astype(int)\n", + "\n", + " df = df.rename(columns={'Utility Name': 'Utility Name (2013-2020)'})\n", + "\n", + " # in original file, total was in a row at the end, with no label\n", + " # there was also an empty row between the total row and the rows with data by utility\n", + " # both the total row and empty row have '0' as utility name, because of fillna(0) above\n", + " # so only keep rows with 'Utility Name (2013-2020)' not 0\n", + " df = df[df['Utility Name (2013-2020)']!=0]\n", + "\n", + " elec_alloc_2013_2020 = df\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # create elec_alloc_2021_2030\n", + " df = pd.read_excel(prmt.input_file, sheet_name='CA elec alloc 2021-2030')\n", + "\n", + " # clean up data, including in column headers\n", + " # strip out line breaks (\\xa0) & spaces & commas\n", + " df = df.replace('\\xa0', '', regex=True)\n", + " df.columns = df.columns.str.strip('\\xa0')\n", + " df = df.rename(columns={'Utility': 'Utility Name'})\n", + " df = df.set_index('Utility Name')\n", + " df.columns = df.columns.astype(int)\n", + " df = df.replace(',', '', regex=True)\n", + "\n", + " # convert all column names to int\n", + " for column in range(2021, 2030+1):\n", + " df[column] = df[column].astype(int)\n", + " df = df.reset_index()\n", + "\n", + " # rename utilities according to map I created between 2013-2020 and 2021-2030 versions\n", + " CA_util_names_map = pd.read_excel(prmt.input_file, sheet_name='CA util names map')\n", + " CA_util_names_map = CA_util_names_map.replace('\\xa0', '', regex=True)\n", + " CA_util_names_map.columns = CA_util_names_map.columns.str.strip('\\xa0')\n", + "\n", + " df = pd.merge(df, CA_util_names_map, left_on='Utility Name', right_on='Utility Name (2021-2030)', how='outer')\n", + " df = df.drop(['Utility Name', 'notes'], axis=1)\n", + "\n", + " elec_alloc_2021_2030 = df\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: create elec_alloc_IOU & elec_alloc_POU')\n", + " \n", + " # create elec_alloc_IOU & elec_alloc_POU (units MMTCO2e)\n", + " df = pd.merge(elec_alloc_2013_2020, elec_alloc_2021_2030,\n", + " left_on='Utility Name (2013-2020)', right_on='Utility Name (2013-2020)', how='outer')\n", + "\n", + " df['Utility Type'] = df['Utility Type'].replace('COOP', 'POU')\n", + " df = df.groupby('Utility Type').sum().T\n", + " df.index = df.index.astype(int)\n", + " df = df / 1e6\n", + "\n", + " elec_alloc_IOU = df['IOU']\n", + " elec_alloc_IOU.name = 'elec_alloc_IOU'\n", + " elec_alloc_POU = df['POU']\n", + " elec_alloc_POU.name = 'elec_alloc_POU'\n", + "\n", + " # elec_alloc_IOU and elec_alloc_POU are transferred to appropriate accounts later, in consignment section\n", + " \n", + " return(elec_alloc_IOU, elec_alloc_POU)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_nat_gas_alloc(CA_alloc_data):\n", + " # historical data from annual allocation reports; stored in input file\n", + " # have to use these historical values to calculate 2011 natural gas supplier emissions\n", + " # once 2011 natural gas supplier emissions has been calculated, can use equation in regulations for projections\n", + "\n", + " CA_cap_adjustment_factor = prmt.CA_cap_adjustment_factor\n", + " \n", + " logging.info('initialize: nat_gas_alloc')\n", + " \n", + " nat_gas_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='nat_gas_alloc']\n", + " nat_gas_alloc['year'] = nat_gas_alloc['year'].astype(int)\n", + " nat_gas_alloc = nat_gas_alloc.set_index('year')['data']\n", + "\n", + " # not clear from MRR which emissions are credited to natural gas suppliers, or which emissions regs are referring to\n", + " # but can infer what emissions in 2011 ARB used for calculating allocations disbursed to date (2015-2017)\n", + " # emissions in 2011 = reported allocations for year X / adjustment factor for year X\n", + " # can calculate emissions in 2011 from this equation for any particular year;\n", + " # to avoid rounding errors, can calculate mean of ratios from each year\n", + " nat_gas_emissions_2011_inferred = (nat_gas_alloc / CA_cap_adjustment_factor).mean()\n", + "\n", + " # get last historical year of nat_gas_alloc\n", + " nat_gas_alloc_last_year = nat_gas_alloc.index[-1]\n", + "\n", + " # calculate allocation for all future years\n", + " for future_year in range(nat_gas_alloc_last_year, 2031+1):\n", + " nat_gas_alloc_future = nat_gas_emissions_2011_inferred * CA_cap_adjustment_factor.at[future_year]\n", + " nat_gas_alloc.at[future_year] = nat_gas_alloc_future\n", + "\n", + " # add data points with zeros to make later steps easier\n", + " nat_gas_alloc.at[2013] = float(0)\n", + " nat_gas_alloc.at[2014] = float(0)\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " nat_gas_alloc = nat_gas_alloc / 1e6\n", + "\n", + " nat_gas_alloc.name = 'nat_gas_alloc'\n", + " \n", + " return(nat_gas_alloc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_industrial_etc_alloc(CA_alloc_data):\n", + " \n", + " CA_cap_adjustment_factor = prmt.CA_cap_adjustment_factor\n", + " \n", + " logging.info('initialize: industrial_alloc')\n", + " \n", + " industrial_alloc = CA_alloc_data.copy()[CA_alloc_data['name'].isin(['industrial_alloc', 'industrial_and_legacy_gen_alloc'])]\n", + " industrial_alloc['year'] = industrial_alloc['year'].astype(int)\n", + " industrial_alloc = industrial_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " industrial_alloc = industrial_alloc/1e6\n", + "\n", + " industrial_alloc.name = 'industrial_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: water_alloc')\n", + " water_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='water_alloc']\n", + " water_alloc['year'] = water_alloc['year'].astype(int)\n", + " water_alloc = water_alloc.set_index('year')\n", + " water_alloc = water_alloc['data']\n", + " water_alloc = water_alloc.astype(float)\n", + "\n", + " # § 95895(b): \"2021 and subsequent years\"\n", + " # (calculate values 2021-2031, and also combine below with values 2015-2020)\n", + "\n", + " # for post-2020, method is:\n", + " # allocation = 47,853 × cap_adjustment_factor (by year)\n", + "\n", + " # get base level (value 47,853 allowances; stored in input file)\n", + " water_alloc_post_2020_base_level = CA_alloc_data.set_index('name').at['water_alloc_post_2020_base_level', 'data']\n", + "\n", + " for year in range(2021, 2030+1):\n", + " water_alloc_year = water_alloc_post_2020_base_level * CA_cap_adjustment_factor[year]\n", + " water_alloc.at[year] = water_alloc_year\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " water_alloc = water_alloc / 1e6\n", + "\n", + " water_alloc.name = 'water_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: university_alloc')\n", + " university_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='university_alloc']\n", + " university_alloc['year'] = university_alloc['year'].astype(int)\n", + " university_alloc = university_alloc.set_index('year')['data']\n", + "\n", + " university_alloc.name = 'university_alloc'\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " university_alloc = university_alloc / 1e6\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: legacy_gen_alloc')\n", + " legacy_gen_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='legacy_gen_alloc']\n", + " legacy_gen_alloc['year'] = legacy_gen_alloc['year'].astype(int)\n", + " legacy_gen_alloc = legacy_gen_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " legacy_gen_alloc = legacy_gen_alloc / 1e6\n", + "\n", + " legacy_gen_alloc.name = 'legacy_gen_alloc'\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: thermal_output_alloc')\n", + " \n", + " # variable allocation\n", + " thermal_output_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='thermal_output_alloc']\n", + " thermal_output_alloc['year'] = thermal_output_alloc['year'].astype(int)\n", + " thermal_output_alloc = thermal_output_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " thermal_output_alloc = thermal_output_alloc / 1e6\n", + "\n", + " thermal_output_alloc.name = 'thermal_output_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: waste_to_energy_alloc')\n", + " waste_to_energy_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='waste_to_energy_alloc']\n", + " waste_to_energy_alloc['year'] = waste_to_energy_alloc['year'].astype(int)\n", + " waste_to_energy_alloc = waste_to_energy_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " waste_to_energy_alloc = waste_to_energy_alloc / 1e6\n", + "\n", + " waste_to_energy_alloc.name = 'waste_to_energy_alloc'\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " logging.info('initialize: LNG_supplier_alloc')\n", + " # variable allocation\n", + " LNG_supplier_alloc = CA_alloc_data.copy()[CA_alloc_data['name']=='LNG_supplier_alloc']\n", + " LNG_supplier_alloc['year'] = LNG_supplier_alloc['year'].astype(int)\n", + " LNG_supplier_alloc = LNG_supplier_alloc.set_index('year')['data']\n", + "\n", + " # convert units from allowances to MILLION allowances (MMTCO2e)\n", + " LNG_supplier_alloc = LNG_supplier_alloc / 1e6\n", + "\n", + " LNG_supplier_alloc.name = 'LNG_supplier_alloc'\n", + "\n", + " industrial_etc_alloc_list = [industrial_alloc, water_alloc, university_alloc, legacy_gen_alloc, \n", + " thermal_output_alloc, waste_to_energy_alloc, LNG_supplier_alloc]\n", + "\n", + " industrial_etc_alloc = pd.concat(industrial_etc_alloc_list, axis=1).sum(axis=1)\n", + " industrial_etc_alloc.name = 'industrial_etc_alloc_hist'\n", + "\n", + " # calculate what allocation would be in case for all assistance factors at 100% for 2018-2020\n", + " # assume that the resulting allocation for each year would be:\n", + "\n", + " idealized = pd.Series()\n", + " for year in range(2018, 2030+1):\n", + " cap_adj_ratio_year = CA_cap_adjustment_factor.at[year] / CA_cap_adjustment_factor.at[2017]\n", + " idealized.at[year] = industrial_etc_alloc.at[2017] * cap_adj_ratio_year\n", + "\n", + " # compare against ARB's projection from 2018-03-02 workshop presentation, slide 9\n", + " # (as extracted using WebPlotDigitizer)\n", + " ARB_proj = pd.read_excel(prmt.input_file, sheet_name='ARB allocs to 2030')\n", + " ARB_proj = ARB_proj[['year', 'industrial and other allocation (estimate) [WPD]']].set_index('year')\n", + " ARB_proj = ARB_proj[ARB_proj.columns[0]]\n", + " ARB_proj.name = 'industrial_etc_alloc_ARB_proj'\n", + "\n", + " # ARB's graph shows industrial & other allocations somewhat higher (~ +0.5 M / year) over historical period\n", + " # and somewhat lower (~ -0.5 M/year) than my projection based on 2017\n", + "\n", + " # there is uncertainty about what this projection will be, since it depends on activity\n", + " # the two are within ~1.5%, which is close enough\n", + "\n", + " # true-ups to make up for lower assistance factors in 2018-2019 will be applied retroactively, in 2020 & 2021\n", + " CA_trueups_retro = (idealized - ARB_proj).loc[2018:2019]\n", + " CA_trueups_retro.index = CA_trueups_retro.index + 2\n", + " CA_trueups_retro.name = 'CA_trueups_retro'\n", + "\n", + " # allocation for 2020 will use 100% assistance factor\n", + " CA_additional_2020 = (idealized - ARB_proj).loc[2020:2020]\n", + " CA_additional_2020.name = 'CA_additional_2020'\n", + "\n", + " # identify last historical year of data\n", + " last_hist_year = industrial_alloc.index[-1]\n", + "\n", + " # combine the 4 pieces: historical, projection with lower assistance factors, trueups_retro, and additional\n", + " industrial_etc_alloc = pd.concat([industrial_etc_alloc.loc[:last_hist_year], \n", + " ARB_proj.loc[last_hist_year+1:], \n", + " CA_trueups_retro, \n", + " CA_additional_2020], \n", + " axis=1).sum(axis=1)\n", + " industrial_etc_alloc.name = 'industrial_etc_alloc'\n", + " \n", + " return(industrial_etc_alloc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_consign_historical_and_projection_annual(elec_alloc_IOU, elec_alloc_POU, nat_gas_alloc):\n", + " \"\"\"\n", + " Create a projection for consignment quantities to 2030.\n", + " \n", + " For now, will have specific start year (2019).\n", + " \n", + " TO DO: Generalize to start projection after the latest historical year with data on annual consignment.\n", + " \"\"\"\n", + " \n", + " logging.info('create_consign_historical_and_projection_annual')\n", + " \n", + " # calculate annual consignment from input file\n", + " consign_ann = pd.read_excel(prmt.input_file, sheet_name='consign annual')\n", + " consign_ann = consign_ann[consign_ann['name']=='CA_consignment_annual'][['vintage', 'data']]\n", + " consign_ann = consign_ann.set_index('vintage')['data']\n", + " consign_ann.name = 'consign_ann'\n", + " \n", + " # IOUs have to consign 100% of their electricity allocation, so for them, consign = alloc\n", + " # established by § 95892(b)(1)\n", + " consign_elec_IOU = elec_alloc_IOU\n", + " consign_elec_IOU.name = 'consign_elec_IOU'\n", + " \n", + " # POUs have to consign none of their electricity allocation\n", + " # established by § 95892(b)(2)\n", + " \n", + " # natural gas allocation, minimum consignment portion:\n", + " # set by § 95893(b)(1)(A), Table 9-5, and Table 9-6\n", + " # values from tables above are in the input file\n", + " CA_consign_regs = pd.read_excel(prmt.input_file, sheet_name='CA consign regs')\n", + "\n", + " consign_nat_gas_min_fraction = CA_consign_regs[CA_consign_regs['name']=='CA_natural_gas_min_consign_fraction']\n", + " consign_nat_gas_min_fraction = consign_nat_gas_min_fraction.set_index('year')['data']\n", + "\n", + " consign_nat_gas_min = nat_gas_alloc * consign_nat_gas_min_fraction\n", + " consign_nat_gas_min.name = 'consign_nat_gas_min'\n", + " \n", + " # analysis of natural gas consignment:\n", + " # if we assume that natural gas allocation is proportional to MRR covered emissions for natural gas distribution...\n", + " # ... for each entity, for those entities that did receive an allocation...\n", + " # ... then we can conclude that IOUs are consigning zero or negligible (~0.1 MMTCO2e) optional amounts...\n", + " # ... above the minimum natural gas consignment\n", + " # then actual nat gas consignment = minimum nat gas consignment\n", + " consign_nat_gas = consign_nat_gas_min.copy()\n", + " consign_nat_gas.name = 'consign_nat_gas'\n", + " \n", + " nat_gas_not_consign = pd.concat([nat_gas_alloc, -1*consign_nat_gas], axis=1).sum(axis=1).loc[2013:2030]\n", + " nat_gas_not_consign.name = 'nat_gas_not_consign'\n", + " \n", + " # infer optional consignment amount (elec & nat gas)\n", + " consign_opt = (consign_ann - consign_elec_IOU - consign_nat_gas.fillna(0)).loc[2013:2030]\n", + " \n", + " # if IOUs are not consigning any optional amounts from their nat gas allocation, \n", + " # and we know IOUs must consign 100% of their electricity allocation,\n", + " # then we can conclude that all of the consign optional is from POUs\n", + " # and\n", + " # if we assume that POUs are like IOUs in consigning only the minimum required from natural gas allocation,\n", + " # then the optional POU consignment (in excess of the minimum for nat gas) would be from POUs' electricity allocation\n", + " # (remember, POUs don't have to consign any of their electricity allocation)\n", + " consign_elec_POU = consign_opt.copy()\n", + " consign_elec_POU.name = 'consign_elec_POU'\n", + " \n", + " # calculate the mean fraction of the electricity POU allocation that was consigned \n", + " # (any electricity POU consignment is optional; none is required)\n", + " consign_elec_POU_fraction = (consign_opt/elec_alloc_POU).mean()\n", + "\n", + " for year in range(2019, 2030+1):\n", + " consign_elec_POU_year = elec_alloc_POU.at[year] * consign_elec_POU_fraction \n", + " consign_elec_POU.at[year] = consign_elec_POU_year\n", + " \n", + " elec_POU_not_consign = pd.concat([elec_alloc_POU, -1*consign_elec_POU], axis=1).sum(axis=1).loc[2013:2030]\n", + " elec_POU_not_consign.name = 'elec_POU_not_consign'\n", + " \n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " # if we want to distinguish nat gas consign IOU vs POU, \n", + " # could assume that nat gas allocations are proportional to nat gas distribution covered emissions from each entity\n", + " # note that not all IOUs with natural gas distribution covered emissions (according to MRR) received allocations\n", + " # but all POUs with natural gas distribution covered emissions (according to MRR) did receive allocations\n", + "\n", + " # note that we only have emissions data for 2015-2016\n", + " # so to split nat gas allocations for 2017-2018 between IOU and POU, \n", + " # would need to assume each entity receiving an allocation had the same percentage of emissions as in historical data\n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " \n", + " # consign_ann: calculate new values for projection years\n", + " for year in range(2019, 2030+1):\n", + " consign_ann.at[year] = consign_elec_IOU.at[year] + consign_elec_POU.at[year] + consign_nat_gas.at[year]\n", + " \n", + " # TO DO: create named tuple for all consigned & not consigned dfs; output this named tuple\n", + " \n", + " return(consign_ann, consign_elec_IOU, consign_nat_gas, consign_elec_POU, nat_gas_not_consign, elec_POU_not_consign)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def consign_upsample_historical_and_projection(consign_ann):\n", + " # QUARTERLY VALUES: GET HISTORICAL & CALCULATE PROJECTIONS\n", + " \n", + " qauct_new_avail = prmt.qauct_new_avail\n", + " \n", + " consign_q_hist = qauct_new_avail.loc[qauct_new_avail.index.get_level_values('inst_cat')=='consign']\n", + " \n", + " last_cur_hist_q = consign_q_hist.index.get_level_values('date_level').max()\n", + "\n", + " # create template row for adding additional rows to consign_hist_proj\n", + " consign_1q_template = consign_q_hist.loc[consign_q_hist.index[-1:]]\n", + " consign_1q_template.at[consign_1q_template.index, 'quant'] = float(0)\n", + "\n", + " if last_cur_hist_q.quarter < 4:\n", + " # fill in missing quarters for last historical year\n", + " \n", + " # get annual total consigned\n", + " consign_ann_1y = consign_ann.at[last_cur_hist_q.year]\n", + " \n", + " # calculate total already newly available that year\n", + " df = consign_q_hist.loc[consign_q_hist.index.get_level_values('date_level').year==last_cur_hist_q.year]\n", + " consign_1y_to_date = df['quant'].sum()\n", + " \n", + " # calculate remaining to consign\n", + " consign_remaining = consign_ann_1y - consign_1y_to_date\n", + "\n", + " # number of remaining auctions:\n", + " num_remaining_auct = 4 - last_cur_hist_q.quarter\n", + " \n", + " # average consignment in remaining auctions\n", + " avg_consign = consign_remaining / num_remaining_auct\n", + " \n", + " consign_hist_proj = consign_q_hist.copy()\n", + " \n", + " for proj_q in range(last_cur_hist_q.quarter+1, 4+1):\n", + " proj_date = quarter_period(f\"{last_cur_hist_q.year}Q{proj_q}\")\n", + " \n", + " # create new row; update date_level and quantity\n", + " consign_new_row = consign_1q_template.copy()\n", + " mapping_dict = {'date_level': proj_date}\n", + " consign_new_row = multiindex_change(consign_new_row, mapping_dict)\n", + " consign_new_row.at[consign_new_row.index, 'quant'] = avg_consign\n", + " \n", + " # set new value in consign_hist_proj\n", + " consign_hist_proj = consign_hist_proj.append(consign_new_row)\n", + " \n", + " # for years after last historical data year (last_cur_hist_q.year) \n", + " for year in range(last_cur_hist_q.year+1, 2030+1):\n", + " avg_consign = consign_ann.loc[year] / 4\n", + " \n", + " for quarter in [1, 2, 3, 4]:\n", + " proj_date = quarter_period(f\"{year}Q{quarter}\")\n", + "\n", + " # create new row; update date_level and quantity\n", + " consign_new_row = consign_1q_template.copy()\n", + " mapping_dict = {'date_level': proj_date, \n", + " 'vintage': year}\n", + " consign_new_row = multiindex_change(consign_new_row, mapping_dict)\n", + " \n", + " consign_new_row.at[consign_new_row.index, 'quant'] = avg_consign\n", + " \n", + " # set new value in consign_hist_proj\n", + " consign_hist_proj = consign_hist_proj.append(consign_new_row)\n", + " \n", + " prmt.consign_hist_proj = consign_hist_proj\n", + " \n", + " # no return; func sets object attribute prmt.consign_hist_proj" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_QC_allocation_data():\n", + " \"\"\"\n", + " From input file, import full data set on QC allocations.\n", + " \n", + " Separated by emissions year, date of allocation, and type of allocation (initial, true-up #1, etc.).\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialize: {inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " # get more detailed allocation data (for hindcast)\n", + " QC_alloc_hist = pd.read_excel(prmt.input_file, sheet_name='QC alloc data full')\n", + " QC_alloc_hist['allocation quarter'] = pd.to_datetime(QC_alloc_hist['allocation date']).dt.to_period('Q')\n", + " \n", + " # convert units to MMTCO2e\n", + " QC_alloc_hist['quant'] = QC_alloc_hist['quantity to date (tons CO2e)']/1e6\n", + " \n", + " QC_alloc_hist = QC_alloc_hist.drop(['before or after quarterly auction', \n", + " 'allocation date',\n", + " 'quantity to date (tons CO2e)',\n", + " 'quantity on date for true-ups (tons CO2e)', \n", + " 'notes'], \n", + " axis=1)\n", + " \n", + " QC_alloc_hist = QC_alloc_hist.set_index(['allocation for emissions year',\n", + " 'allocation type',\n", + " 'allocation quarter'])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " # isolate initial allocations\n", + " QC_alloc_initial = QC_alloc_hist.loc[QC_alloc_hist.index.get_level_values('allocation type')=='initial']\n", + " QC_alloc_initial.index = QC_alloc_initial.index.droplevel('allocation type')\n", + " \n", + " # make projection for initial allocations\n", + " # take most recent historical data, assume future initial allocations will scale down with cap\n", + " last_year = QC_alloc_initial.index.get_level_values('allocation for emissions year').max()\n", + "\n", + " df = QC_alloc_initial.loc[QC_alloc_initial.index.get_level_values('allocation quarter').year==last_year]\n", + " QC_alloc_initial_last_year = df['quant'].sum()\n", + " \n", + " # initialize and clear values\n", + " QC_alloc_initial_proj = QC_alloc_initial.copy() # initialize\n", + " QC_alloc_initial_proj['quant'] = float(0)\n", + " QC_alloc_initial_proj = QC_alloc_initial_proj.loc[QC_alloc_initial_proj['quant']>0]\n", + " \n", + " # use cap_adjustment_factor\n", + " # assume the initial allocations will always be in Q1\n", + " for year in range(last_year+1, 2030+1):\n", + " cap_adjustment_ratio = prmt.QC_cap.at[year]/prmt.QC_cap.at[last_year]\n", + " QC_alloc_initial_proj_quant = QC_alloc_initial_last_year * cap_adjustment_ratio\n", + " QC_alloc_initial_proj.at[(year, quarter_period(f'{year}Q1')), 'quant'] = QC_alloc_initial_proj_quant\n", + " \n", + " QC_alloc_initial = QC_alloc_initial.append(QC_alloc_initial_proj)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~\n", + " # calculate true-ups for each distribution from cumulative data reported\n", + " # (use diff to calculate difference between a given data point and previous one)\n", + " QC_alloc_trueups = QC_alloc_hist.groupby('allocation for emissions year').diff().dropna()\n", + " QC_alloc_trueups.index = QC_alloc_trueups.index.droplevel('allocation type')\n", + " \n", + " # make projection for true-up allocations, following after latest year with a first true-up (in Q3)\n", + " Q3_trueup_mask = QC_alloc_trueups.index.get_level_values('allocation quarter').quarter==3\n", + " Q3_trueups = QC_alloc_trueups.copy().loc[Q3_trueup_mask]\n", + " not_Q3_trueups = QC_alloc_trueups.loc[~Q3_trueup_mask]\n", + " \n", + " last_year = Q3_trueups.index.get_level_values('allocation for emissions year').max()\n", + " \n", + " # first true-ups are 25% of total estimated allocation, whereas initial alloc are 75% of total est. alloc\n", + " # therefore first true-ups are one-third (25%/75%) of the initial true-up\n", + " # in projection, do not model any further true-ups after first true-ups (assume no revisions of allocation)\n", + " for year in range(last_year+1, 2030+1):\n", + " init_last_year_plus1 = QC_alloc_initial.at[(year, f'{year}Q1'), 'quant']\n", + " first_trueup_quant = init_last_year_plus1 / 3\n", + " Q3_trueups.at[(year, quarter_period(f'{year+1}Q3')), 'quant'] = first_trueup_quant\n", + " \n", + " Q3_trueups = Q3_trueups.dropna()\n", + " \n", + " # recombine:\n", + " QC_alloc_trueups = pd.concat([Q3_trueups, not_Q3_trueups])\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~\n", + " # also calculate full (est.) allocation for projection years\n", + " # used for setting aside this quantity from cap, for initial alloc and first true-up\n", + " \n", + " # calculate full allocation (100%) from the initial allocation (75% of full quantity)\n", + " QC_alloc_full_proj = QC_alloc_initial_proj * 100/75\n", + " \n", + " # set object attributes\n", + " prmt.QC_alloc_initial = QC_alloc_initial\n", + " prmt.QC_alloc_trueups = QC_alloc_trueups\n", + " prmt.QC_alloc_full_proj = QC_alloc_full_proj\n", + " \n", + " # no return; func sets object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_compliance_events():\n", + " \"\"\"\n", + " From compliance reports, create record of compliance events (quantities surrendered at specific times).\n", + " \n", + " Note that quantities surrendered are *not* the same as the covered emissions that have related obligations.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\") \n", + " \n", + " # get record of retirements (by vintage) from compliance reports\n", + " df = pd.read_excel(prmt.input_file, sheet_name='annual compliance reports')\n", + " df = df.set_index('year of compliance event')\n", + "\n", + " df = df.drop('for 2013-2014 full compliance period')\n", + " df = df.drop(['CA checksum', 'QC checksum'], axis=1)\n", + " df = df.drop(['CA entities retired total (all instruments)', \n", + " 'QC entities retired total (all instruments)'], axis=1)\n", + " df = df.dropna(how='all')\n", + "\n", + " # convert compliance report values into compliance events (transfers that occur each Nov)\n", + " # sum allowances by vintage, combining surrenders by CA & QC entities\n", + " df = df.copy()\n", + " df.columns = df.columns.str.replace('CA entities retired ', '')\n", + " df.columns = df.columns.str.replace('QC entities retired ', '')\n", + " df.columns = df.columns.str.replace('allowance vintage ', '')\n", + " df.columns.name = 'vintage or type'\n", + "\n", + " df = df.stack()\n", + " df = pd.DataFrame(df, columns=['quant'])\n", + " df = df.loc[df['quant'] > 0]\n", + " df = df.groupby(['year of compliance event', 'vintage or type']).sum().reset_index()\n", + " df['compliance_date'] = pd.to_datetime(df['year of compliance event'].astype(str)+'-11-01').dt.to_period('Q')\n", + "\n", + " # rename 'Early Reduction credits'\n", + " df['vintage or type'] = df['vintage or type'].str.replace('Early Reduction credits', 'early_action')\n", + "\n", + " df = df[['compliance_date', 'vintage or type', 'quant']].set_index(['compliance_date', 'vintage or type'])\n", + "\n", + " prmt.compliance_events = df\n", + " \n", + " # no return; func sets object attribute" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "heading_collapsed": true + }, + "source": [ + "# TEST FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_cols_and_indexes_before_transfer(to_acct_MI):\n", + " \"\"\"\n", + " {{{INSERT DOCSTRINGS}}}\n", + " \"\"\"\n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # check that to_acct_MI has only 1 column & that it has MultiIndex\n", + " if len(to_acct_MI.columns)==1 and isinstance(to_acct_MI.index, pd.MultiIndex):\n", + " pass # test passed\n", + " \n", + " elif len(to_acct_MI.columns)>1:\n", + " print(f\"{prmt.test_failed_msg} df to_acct_MI has more than 1 column. Here's to_acct_MI:\")\n", + " print(to_acct_MI)\n", + " \n", + " elif len(to_acct_MI.columns)==0:\n", + " print(f\"{prmt.test_failed_msg} df to_acct_MI has no columns. Here's to_acct_MI:\")\n", + " print(to_acct_MI)\n", + " \n", + " else: # closing \"if len(to_acct_MI.columns)==1...\"\n", + " print(f\"{prmt.test_failed_msg} Something else going on with df to_acct_MI columns and/or index. Here's to_acct_MI:\")\n", + " print(to_acct_MI)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_for_duplicated_indices(df, parent_fn):\n", + " \"\"\"\n", + " Test to check a dataframe (df) for duplicated indices, and if any, to show them (isolated and in context).\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + "\n", + " dups = df.loc[df.index.duplicated(keep=False)]\n", + " \n", + " if dups.empty == False:\n", + " print(f\"{prmt.test_failed_msg} df.index.duplicated when running {parent_fn}; here are duplicated indices:\")\n", + " print(dups)\n", + " \n", + " # get the acct_names that show up in duplicates\n", + " dup_acct_names = dups.index.get_level_values('acct_name').unique().tolist()\n", + " \n", + "# for dup_acct_name in dup_acct_names:\n", + "# print(f\"During {parent_fn}, dups in acct_name {dup_acct_name}; here's the full account:\")\n", + "# print(df.loc[df.index.get_level_values('acct_name')==dup_acct_name])\n", + " elif df[df.index.duplicated(keep=False)].empty == True:\n", + " # test passed: there were no duplicated indices\n", + " pass\n", + " else:\n", + " print(f\"{prmt.test_failed_msg} During {parent_fn}, was meant to be check for duplicated indices.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_if_value_is_float_or_np_float64(test_input):\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " if isinstance(test_input, float)==False and isinstance(test_input, np.float64)==False:\n", + " print(f\"{prmt.test_failed_msg} Was supposed to be a float or np.float64. Instead was type: %s\" % type(test_input))\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_for_negative_values(df, parent_fn):\n", + " \"\"\"\n", + " Checks whether specified df (usually all_accts) has any rows with negative values.\n", + " \n", + " If so, it finds values of acct_name for those rows. \n", + " \n", + " Fn can show the full account for those with negative rows (need to make sure that code is uncommented).\n", + " \"\"\"\n", + " \n", + " if prmt.show_neg_msg == True:\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " neg_cut_off = prmt.neg_cut_off\n", + "\n", + " non_deficits = df.loc[df.index.get_level_values('status')!='deficit']\n", + " neg_values = non_deficits.loc[non_deficits['quant']<-abs(neg_cut_off)]\n", + "\n", + " if len(neg_values) > 0:\n", + " print()\n", + " print(\"Warning\" + f\"! During {parent_fn}, negative values in all_accts (other than deficits).\")\n", + " print(neg_values)\n", + " print()\n", + "\n", + " # get acct_names with negative values\n", + " neg_acct_names = neg_values.index.get_level_values('acct_name').unique().tolist()\n", + "\n", + " for neg_acct_name in neg_acct_names:\n", + " print(f\"There were negative values in acct_name {neg_acct_name}; here's the full account:\")\n", + " print(df.loc[df.index.get_level_values('acct_name')==neg_acct_name])\n", + "\n", + " else:\n", + " pass\n", + " \n", + " else: # prmt.show_neg_msg == False\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_conservation_during_transfer(all_accts, all_accts_sum_init, remove_name):\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # check for conservation of instruments within all_accts\n", + " all_accts_end_sum = all_accts['quant'].sum()\n", + " diff = all_accts_end_sum - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg}: In {inspect.currentframe().f_code.co_name}, instruments were not conserved. Diff: %s\" % diff)\n", + " print(f\"Was for df named {remove_name}\")\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_conservation_simple(df, df_sum_init, parent_fn):\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name}\")\n", + " \n", + " df_sum_final = df['quant'].sum()\n", + " diff = df_sum_final - df_sum_init\n", + " \n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in {parent_fn}\")\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "hidden": true + }, + "outputs": [], + "source": [ + "def test_conservation_against_full_budget(all_accts, juris, parent_fn):\n", + " \"\"\"additional conservation check, using total allowance budget\"\"\"\n", + "\n", + " CA_cap = prmt.CA_cap\n", + " QC_cap = prmt.QC_cap\n", + " \n", + " if juris == 'CA':\n", + " if cq.date <= quarter_period('2017Q4'):\n", + " budget = CA_cap.loc[2013:2020].sum()\n", + " \n", + " # additional allowances vintage 2021-2030 assumed to have been added at start of 2018Q1 (Jan 1)\n", + " # (all we know for sure is they were first included in 2017Q4 CIR)\n", + " # budget will be the same through 2030Q4, as far as we know at this point\n", + " # but in some future year, perhaps 2028, post-2030 allowances would likely be added to the system\n", + " elif cq.date >= quarter_period('2018Q1') and cq.date <= quarter_period('2030Q4'):\n", + " budget = CA_cap.loc[2013:2030].sum()\n", + " else:\n", + " print(\"Need to fill in CA budget after 2030, if those in Oct 2017 regs are retained.\")\n", + " \n", + " elif juris == 'QC':\n", + " if cq.date == quarter_period('2013Q4'):\n", + " budget = QC_cap.loc[2013:2020].sum()\n", + "\n", + " elif cq.date >= quarter_period('2014Q1') and cq.date <= quarter_period('2017Q4'):\n", + " # add Early Action allowances to budget\n", + " budget = QC_cap.loc[2013:2020].sum() + 2.040026 # units: MMTCO2e\n", + "\n", + " # additional allowances vintage 2021-2030 assumed to have been added at start of 2018Q1 (Jan 1)\n", + " # (all we know for sure is they were first included in 2017Q4 CIR)\n", + " # budget will be the same through 2030Q4, as far as we know at this point\n", + " # but in some future year, perhaps 2028, post-2030 allowances would likely be added to the system\n", + " elif cq.date >= quarter_period('2018Q1') and cq.date <= quarter_period('2030Q4'):\n", + " budget = QC_cap.loc[2013:2030].sum() + 2.040026 # units: MMTCO2e\n", + "\n", + " else:\n", + " print(\"Error\" + \"! QC budget not defined after 2030.\")\n", + "\n", + " elif juris == 'ON':\n", + " # only represent net flow from ON into CA-QC; \n", + " # these may be any juris, but for purposes of tracking they are recorded as juris 'ON'\n", + " \n", + " if cq.date < quarter_period('2018Q2'):\n", + " budget = 0\n", + " \n", + " # as noted in 2018Q2 CIR: \n", + " # \"As of [July 3, 2018], there are 13,186,967 more compliance instruments held in California and Québec \n", + " # accounts than the total number of compliance instruments issued by those two jurisdictions alone.\"\n", + " \n", + " elif cq.date >= quarter_period('2018Q2') and cq.date <= quarter_period('2017Q4'):\n", + " # add Early Action allowances to budget\n", + " budget = 13.186967 # units: MMTCO2e\n", + " \n", + " else:\n", + " print(\"Error\" + f\"! Some other juris not in list; juris is: {juris}\")\n", + " \n", + " diff = all_accts['quant'].sum() - budget\n", + "\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in {parent_fn}.\")\n", + " print(f\"(Final value minus full budget ({budget} M) was: {diff} M.)\")\n", + " # print(f\"Was for auct_type: {auct_type}\")\n", + " else:\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def test_snaps_end_Q4_sum():\n", + " \"\"\"Check for modifications to snaps_end_Q4 by checking sum.\"\"\"\n", + " \n", + " if prmt.snaps_end_Q4['quant'].sum() == prmt.snaps_end_Q4_sum:\n", + " # no change to sum of prmt.snaps_end_Q4; equal to original sum calculated in model initialization\n", + " pass\n", + " else:\n", + " print(f\"{prmt.test_failed_msg} snaps_end_Q4 sum has changed from initial value.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MAIN FUNCTIONS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_annual_budgets_in_alloc_hold(all_accts, ser):\n", + " \"\"\"\n", + " Creates allowances for each annual budget, in the Allocation Holding account (alloc_hold).\n", + " \n", + " Does this for each juris.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " df = pd.DataFrame(ser)\n", + " \n", + " if len(df.columns==1):\n", + " df.columns = ['quant']\n", + " else:\n", + " print(\"Error\" + \"! In convert_cap_to_MI, len(df.columns==1) was False.\")\n", + " \n", + " df.index.name = 'vintage'\n", + " df = df.reset_index()\n", + "\n", + " # metadata for cap\n", + " df['acct_name'] = 'alloc_hold'\n", + " df['juris'] = ser.name.split('_')[0]\n", + " df['inst_cat'] = 'cap'\n", + " # vintage already assigned above\n", + " df['auct_type'] = 'n/a'\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['date_level'] = prmt.NaT_proxy\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + " \n", + " df = df.set_index(prmt.standard_MI_names)\n", + " \n", + " all_accts = all_accts.append(df)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer__from_alloc_hold_to_specified_acct(all_accts, to_acct_MI, vintage_start, vintage_end): \n", + " \"\"\"\n", + " Transfers allocations from allocation holding account (alloc_hold) to other accounts.\n", + " \n", + " Works for APCR (to APCR_acct), VRE (to VRE_acct), and advance (to auct_hold).\n", + " \n", + " Destination account is contained in to_acct_MI metadata.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " vintage_range = range(vintage_start, vintage_end+1)\n", + " \n", + " if prmt.run_tests == True:\n", + " test_cols_and_indexes_before_transfer(to_acct_MI) \n", + " \n", + " # change column name of to_acct_MI, but only if to_acct_MI is a df with one column and MultiIndex\n", + " # rename column of to_acct_MI, whatever it is, to 'to_acct_MI_quant'\n", + " # filter to vintages specified\n", + " to_acct_MI.columns = ['quant']\n", + " to_acct_MI_in_vintage_range = to_acct_MI[to_acct_MI.index.get_level_values('vintage').isin(vintage_range)]\n", + " \n", + " # ~~~~~~~~~~~~~~~`\n", + " # create df named remove, which is negative of to_acct_MI; rename column\n", + " remove = -1 * to_acct_MI_in_vintage_range.copy()\n", + "\n", + " # set indices in remove df (version of to_acct_MI) to be same as from_acct\n", + " mapping_dict = {'acct_name': 'alloc_hold', \n", + " 'inst_cat': 'cap', \n", + " 'auct_type': 'n/a', \n", + " 'newness': 'n/a', \n", + " 'status': 'n/a'}\n", + " remove = multiindex_change(remove, mapping_dict)\n", + "\n", + " # ~~~~~~~~~~~~~~~~\n", + " # if APCR, sum over all vintages, change vintage to 2200 (proxy for non-vintage)\n", + " # then groupby sum to combine all into one set\n", + " to_acct_name = to_acct_MI.index.get_level_values('acct_name').unique().tolist()\n", + " \n", + " if to_acct_name == ['APCR_acct']: \n", + " mapping_dict = {'vintage': 2200}\n", + " to_acct_MI_in_vintage_range = multiindex_change(to_acct_MI_in_vintage_range, mapping_dict)\n", + " to_acct_MI_in_vintage_range = to_acct_MI_in_vintage_range.groupby(level=prmt.standard_MI_names).sum()\n", + "\n", + " elif len(to_acct_name) != 1:\n", + " print(\"Error\" + \"! There was more than one to_acct_name in df that was intended to be for APCR_acct only.\")\n", + " \n", + " else:\n", + " pass\n", + " # ~~~~~~~~~~~~~~~\n", + "\n", + " # separate out any rows with negative values\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " \n", + " # combine dfs to subtract from from_acct & add to_acct_MI_1v\n", + " # (groupby sum adds the positive values in all_accts_pos and the neg values in remove)\n", + " all_accts_pos = pd.concat([all_accts_pos, remove, to_acct_MI_in_vintage_range], sort=True)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine pos & neg\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True:\n", + " name_of_allowances = to_acct_MI.index.get_level_values('inst_cat').unique().tolist()[0]\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_CA_alloc__from_alloc_hold(all_accts, to_acct_MI):\n", + " \"\"\"\n", + " Moves allocated allowances from alloc_hold into private accounts (for non-consign) or limited_use (for consign).\n", + " \n", + " The destination account is specified in metadata (index level 'acct_name') in df to_acct_MI.\n", + " \n", + " Runs at the end of each year (except for anomalous years).\n", + " \n", + " Only processes one vintage at a time; vintage is cq.date.year + 1\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " if prmt.run_tests == True:\n", + " test_cols_and_indexes_before_transfer(to_acct_MI)\n", + "\n", + " # change column name of to_acct_MI, but only if to_acct_MI is a df with one column and MultiIndex\n", + " # rename column of to_acct_MI, whatever it is, to 'to_acct_MI_quant'\n", + " to_acct_MI.columns = ['quant']\n", + " \n", + " # filter to specific vintage\n", + " to_acct_MI_1v = to_acct_MI[to_acct_MI.index.get_level_values('vintage')==(cq.date.year+1)]\n", + " \n", + " # create df named remove, which is negative of to_acct_MI; rename column\n", + " remove = -1 * to_acct_MI_1v\n", + "\n", + " # set indices in remove df (version of to_acct_MI) to be same as from_acct\n", + " mapping_dict = {'acct_name': 'alloc_hold', \n", + " 'inst_cat': 'cap', \n", + " 'auct_type': 'n/a', \n", + " 'newness': 'n/a', \n", + " 'status': 'n/a'}\n", + " remove = multiindex_change(remove, mapping_dict)\n", + "\n", + " # separate out any rows with negative values\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " \n", + " # combine dfs to subtract from from_acct & add to_acct_MI_1v\n", + " # (groupby sum adds the positive values in all_accts_pos and the neg values in remove)\n", + " all_accts_pos = pd.concat([all_accts_pos, remove, to_acct_MI_1v], sort=False)\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine pos & neg\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " if prmt.run_tests == True:\n", + " inst_cat_to_acct = str(to_acct_MI.index.get_level_values('inst_cat').unique())\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, inst_cat_to_acct)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer__from_VRE_acct_to_retirement(all_accts): \n", + " \"\"\"\n", + " Transfers allocations from allocation holding account (alloc_hold) to other accounts.\n", + " \n", + " Works for APCR (to APCR_acct), VRE (to VRE_acct), and advance (to auct_hold).\n", + " \n", + " Destination account is contained in to_acct_MI metadata.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " VRE_retired = prmt.VRE_retired\n", + " \n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " try:\n", + " VRE_retired_1q = VRE_retired.xs(cq.date, level='CIR_date', drop_level=False)\n", + "\n", + " VRE_retired_1q = VRE_retired_1q.reset_index()\n", + "\n", + " # record date of retirement as 'date_level'\n", + " VRE_retired_1q = VRE_retired_1q.rename(columns={'CIR_date': 'date_level'})\n", + "\n", + " # create MultiIndex version of VRE_retired_1q, for doing removal and addition to all_accts\n", + " df = VRE_retired_1q.copy()\n", + "\n", + " df['acct_name'] = 'VRE_acct'\n", + " df['juris'] = 'CA'\n", + " df['auct_type'] = 'n/a'\n", + " df['inst_cat'] = 'VRE_reserve'\n", + " # vintage already set (index level 'Vintage')\n", + " df['newness'] = 'n/a'\n", + " df['status'] = 'n/a'\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " # create MultiIndex version\n", + " VRE_retired_1q_MI = df.set_index(prmt.standard_MI_names)\n", + "\n", + " to_remove = -1 * VRE_retired_1q_MI.copy()\n", + " mapping_dict = {'date_level': prmt.NaT_proxy}\n", + " to_remove = multiindex_change(to_remove, mapping_dict)\n", + "\n", + " to_transfer = VRE_retired_1q_MI.copy()\n", + " mapping_dict = {'acct_name': 'retirement', \n", + " 'status': 'retired'}\n", + " to_transfer = multiindex_change(to_transfer, mapping_dict)\n", + " \n", + " # concat with all_accts_pos, groupby sum, recombine with all_accts_neg\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_pos = pd.concat([all_accts_pos, to_remove, to_transfer], sort=True).groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " logging.info(f\"VRE retirement of {to_transfer['quant'].sum()} M.\")\n", + " \n", + " except:\n", + " # no VRE_retired for given date\n", + " pass\n", + " \n", + " \n", + " if prmt.run_tests == True:\n", + " name_of_allowances = 'VRE_reserve'\n", + " test_conservation_during_transfer(all_accts, all_accts_sum_init, name_of_allowances)\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def consign_groupby_sum_in_all_accts(all_accts):\n", + " \"\"\"\n", + " Sums all types of consignment allowances and assigns them inst_cat 'consign'.\n", + " \n", + " This overwrites old values of inst_cat (i.e., 'elec_alloc_IOU', etc.).\n", + " \n", + " Then this sums across the types of consignment allowances, to get a single annual value for consignment.\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " mask1 = all_accts.index.get_level_values('acct_name')=='limited_use'\n", + " mask2 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2)\n", + " \n", + " consigned = all_accts.loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " mapping_dict = {'inst_cat': 'consign'}\n", + " consigned = multiindex_change(consigned, mapping_dict)\n", + " consigned = consigned.groupby(level=prmt.standard_MI_names).sum() \n", + " \n", + " all_accts = consigned.append(remainder)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_consign__from_limited_use_to_auct_hold_2012Q4_and_make_available(all_accts):\n", + " \"\"\"\n", + " Specified quantities in limited_use account of a particular vintage will be moved to auct_hold.\n", + "\n", + " Only for anomalous auction 2012Q4 (CA-only), in which vintage 2013 consignment were sold at current auction.\n", + " \"\"\" \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " qauct_new_avail = prmt.qauct_new_avail\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # look up quantity consigned in 2012Q4 from historical record (consign_2012Q4)\n", + " # this anomalous fn runs when cq.date = 2012Q4\n", + " quarter_2012Q4 = quarter_period('2012Q4')\n", + " consign_2012Q4_vintage = 2013\n", + " consign_2012Q4 = qauct_new_avail.at[('auct_hold', 'CA', 'current', 'consign', consign_2012Q4_vintage, \n", + " 'new', 'not_avail', quarter_2012Q4, prmt.NaT_proxy, prmt.NaT_proxy),\n", + " 'quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " test_if_value_is_float_or_np_float64(consign_2012Q4)\n", + " \n", + " # get allowances in limited_use, inst_cat=='consign', for specified vintage\n", + " # and calculate sum of that set\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='limited_use'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " mask3 = all_accts.index.get_level_values('vintage')==cq.date.year+1\n", + " mask = (mask1) & (mask2) & (mask3)\n", + " consign_not_avail = all_accts.loc[mask]\n", + " quant_not_avail = consign_not_avail['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST:\n", + " if len(consign_not_avail) != 1:\n", + " print(f\"{prmt.test_failed_msg} Expected consign_not_avail to have 1 row. Here's consign_not_avail:\")\n", + " print(consign_not_avail)\n", + " print(\"Here's all_accts.loc[mask1] (limited_use):\")\n", + " print(all_accts.loc[mask1])\n", + " # END OF TEST\n", + " \n", + " # split consign_not_avail to put only the specified quantity into auct_hold; \n", + " # rest stays in limited_use\n", + " consign_avail = consign_not_avail.copy()\n", + " \n", + " # consign_avail and consign_not_avail have same index (before consign avail index updated below)\n", + " # use this common index for setting new values for quantity in each df\n", + " # (only works because each df is a single row, as tested for above)\n", + " index_first_row = consign_avail.index[0]\n", + " \n", + " # set quantity in consign_avail, using consign_2012Q4 (input/argument for this function)\n", + " consign_avail.at[index_first_row, 'quant'] = consign_2012Q4\n", + " \n", + " # update metadata: put into auct_hold & make them available in cq.date (2012Q4)\n", + " # this fn does not make these allowances available; this will occur in separate fn, at start of cq.date\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'newness': 'new', \n", + " 'date_level': cq.date, \n", + " 'status': 'available'}\n", + " consign_avail = multiindex_change(consign_avail, mapping_dict)\n", + "\n", + " # update quantity in consign_not_avail, to remove those consigned for next_q\n", + " consign_not_avail.at[index_first_row, 'quant'] = quant_not_avail - consign_2012Q4\n", + " \n", + " # get remainder of all_accts\n", + " all_accts_remainder = all_accts.loc[~mask]\n", + " \n", + " # recombine to get all_accts again\n", + " all_accts = pd.concat([consign_avail, consign_not_avail, all_accts_remainder], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_qauct_new_avail():\n", + " \"\"\"\n", + " Create new df qauct_new_avail.\n", + " \n", + " Runs within initialize_model_run (so can't use modelInitialization)\n", + " \"\"\"\n", + " \n", + " logging.info(f\"initialization: {inspect.currentframe().f_code.co_name} (start)\")\n", + "\n", + " qauct_hist = prmt.qauct_hist\n", + " \n", + " # create df: only newly available\n", + " df = qauct_hist.copy()\n", + " df = df.drop(['market', 'Available', 'Sold', 'Unsold' , 'Redesignated'], axis=1) \n", + " df = df.rename(columns={'Newly available': 'quant'})\n", + "\n", + " # add other metadata rows to make it include all prmt.standard_MI_names\n", + " df['acct_name'] = 'auct_hold'\n", + " df['newness'] = 'new'\n", + " df['status'] = 'not_avail' # will become available, but isn't yet\n", + " df['unsold_di'] = prmt.NaT_proxy\n", + " df['unsold_dl'] = prmt.NaT_proxy\n", + " df['units'] = 'MMTCO2e'\n", + "\n", + " df = df.set_index(prmt.standard_MI_names)\n", + " \n", + " # rename & copy (to avoid problem with slices)\n", + " qauct_new_avail = df.copy()\n", + " \n", + " prmt.qauct_new_avail = qauct_new_avail\n", + " # no return; func sets object attribute prmt.qauct_new_avail" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def initialize_CA_auctions(all_accts):\n", + " \"\"\"\n", + " Initializes first CA auction in 2012Q4 (also first for any juris in WCI market).\n", + " \n", + " This auction was anomalous, in that:\n", + " 1. There was only this single auction in 2012.\n", + " 2. The 2012Q4 current auction had available only consignment allowances (and no state-owned), \n", + " and they were a vintage ahead (2013).\n", + " 3. The 2012Q4 advance auction had available all vintage 2015 allowances at once.\n", + " \n", + " This function runs only once in each model run.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # create CA allowances v2013-v2020, put into alloc_hold\n", + " all_accts = create_annual_budgets_in_alloc_hold(all_accts, prmt.CA_cap.loc[2013:2020])\n", + " \n", + " # pre-test for conservation of allowances\n", + " # (sum_init has to be after creation of allowances in previous step)\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " # transfer APCR allowances out of alloc_hold, into APCR_acct (for vintages 2013-2020)\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.CA_APCR_MI, 2013, 2020)\n", + "\n", + " # transfer advance into auct_hold\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.CA_advance_MI, 2013, 2020)\n", + "\n", + " # transfer VRE allowances out of alloc_hold, into VRE_acct (for vintages 2013-2020)\n", + " all_accts = transfer__from_alloc_hold_to_specified_acct(all_accts, prmt.VRE_reserve_MI, 2013, 2020)\n", + "\n", + " # allocations:\n", + " # transfer alloc non-consign into ann_alloc_hold & alloc consign into limited_use \n", + " # alloc v2013 transferred in 2012Q4, before Q4 auction (same pattern as later years)\n", + " # transfer out of alloc_hold to ann_alloc_hold or limited_use\n", + " # (appropriate metadata is assigned to each set of allowances by convert_ser_to_df_MI_alloc)\n", + " \n", + " # transfer all the allocations at once (for one vintage)\n", + " # (fn only processes one vintage at a time)\n", + " all_accts = transfer_CA_alloc__from_alloc_hold(all_accts, prmt.CA_alloc_MI_all)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~\n", + " # CURRENT AUCTION (2012Q4 anomaly):\n", + " # for current auction in 2012Q4, no state-owned allowances; only consign\n", + " # consign annual amount in limited_use: get rid of distinctions between types of consign & groupby sum \n", + " all_accts = consign_groupby_sum_in_all_accts(all_accts)\n", + "\n", + " # put consignment allowances into auct_hold & make available\n", + " all_accts = transfer_consign__from_limited_use_to_auct_hold_2012Q4_and_make_available(all_accts)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~\n", + " # ADVANCE AUCTION (2012Q4 anomaly):\n", + " # remember: all vintage 2015 allowances were available at once in this auction\n", + " # get all allowances aside for advance in auct_hold that are vintage 2015 (cq.date.year+3)\n", + " adv_new_mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " adv_new_mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " adv_new_mask3 = all_accts.index.get_level_values('vintage')==(cq.date.year+3)\n", + " adv_new_mask = (adv_new_mask1) & (adv_new_mask2) & (adv_new_mask3)\n", + " adv_new = all_accts.loc[adv_new_mask]\n", + " all_accts_remainder = all_accts.loc[~adv_new_mask]\n", + " \n", + " # for this anomalous advance auction, all of these allowances were available in one auction\n", + " # update metadata: change 'date_level' to '2012Q4' & status' to 'available'\n", + " mapping_dict = {'date_level': quarter_period('2012Q4'),\n", + " 'status': 'available'}\n", + " adv_new = multiindex_change(adv_new, mapping_dict)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~\n", + " # recombine to create new version of all_accts\n", + " all_accts = pd.concat([adv_new, all_accts_remainder], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_CA_quarterly(all_accts):\n", + "\n", + " \"\"\"\n", + " Function that is used in the loop for each quarter, for each juris.\n", + " \n", + " Applies functions defined earlier, as well as additional rules\n", + " \n", + " Order of sales for each jurisdiction set by jurisdiction-specific functions called within process_quarter.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " latest_historical_year_cur = prmt.qauct_new_avail.index.get_level_values('date_level').year.max()\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # object \"scenario\" holds the data for a particular scenario in various attributes (scenario_CA.avail_accum, etc.)\n", + "\n", + " # START-OF-QUARTER STEPS (INCLUDING START-OF-YEAR) ***********************************************\n", + " \n", + " # process retirements for EIM and bankruptcies (CA only)\n", + " if cq.date.quarter == 4:\n", + " \n", + " # process EIM Outstanding in Q4, prior to auctions (required to be done before Nov 1 compliance deadline)\n", + " # retire any allowances to account for EIM Outstanding Emissions\n", + " all_accts = retire_for_EIM_outstanding(all_accts)\n", + "\n", + " # process bankruptcy retirements at same time\n", + " # retire any allowances to account for bankruptcies\n", + " all_accts = retire_for_bankruptcy(all_accts)\n", + "\n", + " else: # cq.date.quarter != 4\n", + " pass\n", + "\n", + " \n", + " # --------------------------------------------\n", + " \n", + " if cq.date.quarter == 1:\n", + " # start-of-year (Jan 1)\n", + " # on Jan 1 each year, all alloc in ann_alloc_hold are transferred to comp_acct or gen_acct \n", + " all_accts = transfer_CA_alloc__from_ann_alloc_hold_to_general(all_accts)\n", + "\n", + " if cq.date.year <= latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): \n", + " # for current auction, state-owned, vintage==cq.date.year, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, 'CA')\n", + "\n", + " # for current auction, state-owned, sum newly avail & unsold adv, upsample, assign 'date_level'\n", + " all_accts = cur_upsample_avail_state_owned_historical(all_accts, 'CA')\n", + "\n", + " elif cq.date.year > latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): \n", + " # for current auction, state-owned, vintage==cq.date.year, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_projection(all_accts, 'CA')\n", + "\n", + " # for current auction, state-owned, sum newly avail & unsold adv, upsample, assign date_level\n", + " all_accts = cur_upsample_avail_state_owned_projection(all_accts, 'CA')\n", + "\n", + " if cq.date.year >= 2013 and cq.date.year <= 2027: \n", + " # start-of-year (Jan 1???): upsample of allowances for advance auction (before Q1 auctions)\n", + " # note that the model does not attempt to simulate advance auctions for vintages after 2027\n", + " all_accts = upsample_advance_all_accts(all_accts)\n", + " else:\n", + " pass\n", + "\n", + "# # for Q1, take snap (~Jan 5):\n", + "# # after transferring CA alloc out of ann_alloc_hold (Jan 1)\n", + "# # and before Q1 auctions (~Feb 15) \n", + "# take_snapshot_CIR(all_accts, 'CA')\n", + " \n", + " else: # cq.date.quarter != 1\n", + "# # for Q2-Q4, take snap before auction (no special steps at the start of each quarter)\n", + "# take_snapshot_CIR(all_accts, 'CA')\n", + " pass\n", + " \n", + " # END OF START-OF-QUARTER STEPS\n", + "\n", + " # ADVANCE AUCTIONS ********************************************************\n", + " # process advance auctions through vintage 2030, which occur in years through 2027\n", + " logging.info(f\"within {inspect.currentframe().f_code.co_name}, start of advance auction\")\n", + " \n", + " if cq.date.year <= 2027:\n", + " # ADVANCE AUCTION: MAKE AVAILABLE \n", + " all_accts = CA_state_owned_make_available(all_accts, 'advance')\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_CA.avail_accum = avail_accum_append(all_accts, scenario_CA.avail_accum, 'advance')\n", + "\n", + " # redesignation of unsold advance to later advance auction\n", + " all_accts = redesignate_unsold_advance_as_advance(all_accts, 'CA')\n", + "\n", + " # ADVANCE AUCTION: PROCESS SALES - CA ONLY AUCTIONS\n", + " all_accts = process_auction_adv_all_accts(all_accts, 'CA')\n", + "\n", + " else: # cq.date.year > 2027\n", + " pass\n", + " \n", + " # CURRENT AUCTION ********************************************************\n", + " logging.info(\"within process_quarter, start of current auction\")\n", + " \n", + " # CA state-owned current: make available for cq.date\n", + " all_accts = CA_state_owned_make_available(all_accts, 'current')\n", + "\n", + " # each quarter, prior to auction\n", + " # consignment: for all allowances in auct_hold with date_level == cq.date, change status to 'available'\n", + " all_accts = consign_make_available_incl_redes(all_accts)\n", + "\n", + " # redesignate any unsold allowances to auction (if any unsold, and if right conditions)\n", + " all_accts = redesignate_unsold_current_auct(all_accts, 'CA')\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_CA.avail_accum = avail_accum_append(all_accts, scenario_CA.avail_accum, 'current')\n", + "\n", + " # process auction\n", + " all_accts = process_auction_cur_CA_all_accts(all_accts)\n", + " \n", + " \n", + " # FINISHING AFTER AUCTION: ***************************************************************\n", + " \n", + " # use new version of fn for updating reintro eligibility\n", + " update_reintro_eligibility('CA')\n", + " \n", + " if cq.date.quarter == 4: \n", + " # Q4 PROCESSING AFTER AUCTION **********************************************\n", + " # this includes transfer of consigned portion of alloc into limited_use\n", + " logging.info(f\"for {cq.date}, Q4 processing after auction: start\")\n", + " \n", + " # end-of-year: move advance unsold to current auction\n", + " all_accts = adv_unsold_to_cur_all_accts(all_accts) \n", + " \n", + " # check for unsold from current auctions, to retire for bankruptcies\n", + " # TO DO: ADD NEW FUNCTION\n", + " \n", + " # note: the transfer allocation step below moves annual consigned allowances into limited_use\n", + " # this needs to happen before allowances for Q1 of next year can be moved from limited_use to auct_hold\n", + " if cq.date.year >= 2013: \n", + " # transfer allocations (consigned & not consigned)\n", + " \n", + " # transfer all allocations\n", + " # (fn only transfers 1 vintage at a time, vintage == cq.date.year + 1)\n", + " all_accts = transfer_CA_alloc__from_alloc_hold(all_accts, prmt.CA_alloc_MI_all)\n", + "\n", + " # disabled func below; at the moment, it is only set up for QC\n", + " # all_accts = transfer_cur__from_alloc_hold_to_auct_hold_new_avail_anomalies(all_accts, 'CA')\n", + " \n", + " # for consign, groupby sum to get rid of distinctions between types (IOU, POU)\n", + " all_accts = consign_groupby_sum_in_all_accts(all_accts)\n", + "\n", + " else:\n", + " # closing \"if cq.date.year >= 2013:\"\n", + " # end of transfer allocation process\n", + " pass\n", + " \n", + " logging.info(f\"for {cq.date}, Q4 processing after auction: end\")\n", + " \n", + " else: \n", + " # closing \"if cq.date.quarter == 4:\"\n", + " pass\n", + " \n", + " # END-OF-QUARTER (EVERY QUARTER) *****************************************\n", + " logging.info(\"end-of-quarter processing (every quarter) - start\")\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " # check for unsold from current auctions, to roll over to APCR\n", + " all_accts = transfer_unsold__from_auct_hold_to_APCR(all_accts)\n", + "\n", + " # check for VRE retirement (historical data only; assumes no future VRE retirements)\n", + " all_accts = transfer__from_VRE_acct_to_retirement(all_accts)\n", + "\n", + " if cq.date < quarter_period('2030Q4'):\n", + " # transfer consignment allowances into auct_hold, for auction in following quarter\n", + " # (each quarter, after auction)\n", + " # have to do this *after* end-of-year transfer of consignment from alloc_hold to limited_use\n", + " all_accts = transfer_consign__from_limited_use_to_auct_hold(all_accts)\n", + " else:\n", + " # no projection for what happens after 2030Q4, so no transfer to occur in 2030Q4\n", + " pass\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CLEANUP OF all_accts (each quarter)\n", + " # get rid of fractional allowances, zeros, and NaN\n", + " logging.info(\"cleanup of all_accts\")\n", + " \n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + " all_accts = all_accts.dropna()\n", + " # END OF CLEANUP OF all_accts\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # take snap_end at end of each quarter, add to list scenario_CA.snaps_end\n", + " take_snapshot_end(all_accts, 'CA')\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " logging.info(\"end-of-quarter processing (every quarter) - end\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn) \n", + " test_conservation_against_full_budget(all_accts, 'CA', parent_fn) \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)\n", + "# end of process_CA_quarterly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_QC_quarterly(all_accts):\n", + "\n", + " \"\"\"\n", + " Function that is used in the loop for each quarter, for each juris.\n", + " \n", + " Applies functions defined earlier, as well as additional rules\n", + " \n", + " Order of sales for each jurisdiction set by jurisdiction-specific functions called within process_quarter.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " latest_historical_year_cur = prmt.qauct_new_avail.index.get_level_values('date_level').year.max()\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # object \"scenario\" holds the data for a particular scenario in various attributes (scenario_QC.avail_accum, etc.)\n", + "\n", + " # START-OF-QUARTER STEPS (INCLUDING START-OF-YEAR) ***********************************************\n", + "\n", + " if cq.date.quarter == 1:\n", + " # start-of-year (Jan 1???): transfer of allowances for current auction\n", + "\n", + " if cq.date.year <= latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): for current auction, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, 'QC')\n", + "\n", + " # calculate\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_new_avail_anomalies(all_accts, 'QC')\n", + "\n", + " all_accts = cur_upsample_avail_state_owned_historical(all_accts, 'QC')\n", + "\n", + " elif cq.date.year > latest_historical_year_cur:\n", + " # start-of-year (Jan 1???): for current auction, transfer of annual quantity of allowances\n", + " all_accts = transfer_cur__from_alloc_hold_to_auct_hold_projection(all_accts, 'QC')\n", + "\n", + " # start-of-year (Jan 1???): for current auction, sum newly avail & unsold adv, upsample, assign date_level\n", + " all_accts = cur_upsample_avail_state_owned_projection(all_accts, 'QC')\n", + "\n", + "\n", + " if cq.date.year >= 2014 and cq.date.year <= 2027: \n", + " # start-of-year (Jan 1???): upsample of allowances for advance auction (before Q1 auctions)\n", + " # note that the model does not attempt to simulate advance auctions for vintages after 2027\n", + " all_accts = upsample_advance_all_accts(all_accts)\n", + " else:\n", + " pass\n", + "\n", + "# # for Q1, take snap (~Jan 5):\n", + "# # before transferring QC alloc out of ann_alloc_hold (~Jan 15)\n", + "# # and before Q1 auctions (~Feb 15)\n", + "# take_snapshot_CIR(all_accts, 'QC')\n", + "\n", + " # start-of-year (Jan 15): transfer QC allocation, initial quantity (75% of estimated ultimate allocation) \n", + " all_accts = transfer_QC_alloc_init__from_alloc_hold(all_accts)\n", + " \n", + " else: # cq.date.quarter != 1\n", + "# # for Q2-Q4, take snap before auction (no special steps at the start of each quarter)\n", + "# take_snapshot_CIR(all_accts, 'QC')\n", + " pass\n", + " \n", + " # END OF START-OF-QUARTER STEPS\n", + "\n", + " # ADVANCE AUCTIONS ********************************************************\n", + " # process advance auctions through vintage 2030, which occur in years through 2027\n", + " logging.info(f\"within {inspect.currentframe().f_code.co_name}, start of advance auction\")\n", + " \n", + " if cq.date.year <= 2027:\n", + " # ADVANCE AUCTION: MAKE AVAILABLE \n", + " all_accts = QC_state_owned_make_available(all_accts, 'advance')\n", + "\n", + " # for QC, no redesignation of unsold advance as advance\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_QC.avail_accum = avail_accum_append(all_accts, scenario_QC.avail_accum, 'advance')\n", + "\n", + " # ADVANCE AUCTION: PROCESS SALES - QC ONLY AUCTIONS\n", + " all_accts = process_auction_adv_all_accts(all_accts, 'QC')\n", + "\n", + " else: # cq.date.year > 2027\n", + " pass\n", + " \n", + " # CURRENT AUCTION ********************************************************\n", + " logging.info(\"within process_quarter, start of current auction\")\n", + " \n", + " # QC state-owned current: make available for cq.date\n", + " all_accts = QC_state_owned_make_available(all_accts, 'current') \n", + "\n", + " all_accts = redesignate_unsold_current_auct(all_accts, 'QC')\n", + "\n", + " # record available allowances (before process auction)\n", + " scenario_QC.avail_accum = avail_accum_append(all_accts, scenario_QC.avail_accum, 'current')\n", + "\n", + " # process auction\n", + " all_accts = process_auction_cur_QC_all_accts(all_accts)\n", + " \n", + " # FINISHING AFTER AUCTION: ***************************************************************\n", + " \n", + " # use new version of fn for updating reintro eligibility\n", + " update_reintro_eligibility('QC')\n", + " \n", + " if cq.date.quarter == 4: \n", + " # Q4 PROCESSING AFTER AUCTION **********************************************\n", + " # this includes transfer of consigned portion of alloc into limited_use\n", + " logging.info(f\"for {cq.date}, for QC, Q4 processing after auction: start\")\n", + " \n", + " # end-of-year: move advance unsold to current auction\n", + " all_accts = adv_unsold_to_cur_all_accts(all_accts)\n", + " \n", + " logging.info(f\"for {cq.date}, Q4 processing after auction: end\")\n", + " \n", + " else: \n", + " # closing \"if cq.date.quarter == 4:\"\n", + " pass\n", + " \n", + " # END-OF-QUARTER (EVERY QUARTER) *****************************************\n", + " logging.info(\"end-of-quarter processing (every quarter) - start\")\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " # September true-ups definitely after auction\n", + " # May auctions might be before auction; not clear; will assume they occur after auctions as well\n", + " # check for QC allocation true-ups; if any, transfer from alloc_hold to gen_acct \n", + " all_accts = transfer_QC_alloc_trueups__from_alloc_hold(all_accts)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # CLEANUP OF all_accts (each quarter)\n", + " # get rid of fractional allowances, zeros, and NaN\n", + " logging.info(\"cleanup of all_accts\")\n", + " \n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + " all_accts = all_accts.dropna()\n", + " # END OF CLEANUP OF all_accts\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " # take snap_end at end of each quarter, add to list scenario_QC.snaps_end\n", + " take_snapshot_end(all_accts, 'QC')\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \n", + " logging.info(\"end-of-quarter processing (every quarter) - end\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn) \n", + " test_conservation_against_full_budget(all_accts, 'QC', parent_fn) \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)\n", + "# end of process_QC_quarterly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def take_snapshot_CIR(all_accts, juris):\n", + " \"\"\"\n", + " Take a snapshot of all_accts, which is later modified for comparison with Compliance Instrument Report (CIR).\n", + " \n", + " snap_CIR labeled with a particular quarter is actually taken early in the following quarter.\n", + " (Example: 2014Q4 snap_CIR is taken in early 2015Q1)\n", + " \n", + " This is following regulators' practice in CIR.\n", + " \n", + " So a snap_CIR taken early in cq.date is labeled as from previous_q (1 quarter before cq.date).\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " previous_q = (pd.to_datetime(f'{cq.date.year}Q{cq.date.quarter}') - DateOffset(months=3)).to_period('Q')\n", + "\n", + " snap_CIR = all_accts.copy()\n", + " snap_CIR['snap_q'] = previous_q\n", + " \n", + " if juris == 'CA':\n", + " scenario_CA.snaps_CIR += [snap_CIR]\n", + " elif juris == 'QC':\n", + " scenario_QC.snaps_CIR += [snap_CIR]\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} named {previous_q} (end)\")\n", + "\n", + " # no return; updates object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def take_snapshot_end(all_accts, juris):\n", + " \"\"\"\n", + " Take a snapshot of all_accts at the end of each quarter.\n", + " \n", + " This is to enable later start of a scenario from any given ending point.\n", + " \n", + " \"\"\" \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " snap_end = all_accts.copy()\n", + " snap_end['snap_q'] = cq.date\n", + " \n", + " if juris == 'CA':\n", + " scenario_CA.snaps_end += [snap_end]\n", + " elif juris == 'QC':\n", + " scenario_QC.snaps_end += [snap_end]\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " # no return; updates object attributes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def avail_accum_append(all_accts, avail_accum, auct_type_specified):\n", + " \"\"\"\n", + " Append allowances available at auction to avail_accum. Runs for advance and current auctions.\n", + " \"\"\"\n", + " \n", + " if prmt.verbose_log == True:\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # record allowances available in each auction\n", + " avail_1q = all_accts.loc[(all_accts.index.get_level_values('status')=='available') & \n", + " (all_accts.index.get_level_values('auct_type')==auct_type_specified)]\n", + "\n", + " if prmt.run_tests == True:\n", + " if len(avail_1q) > 0:\n", + " # check that all available allowances have date_level == cq.date\n", + " avail_1q_dates = avail_1q.index.get_level_values('date_level').unique().tolist()\n", + " if avail_1q_dates != [cq.date]:\n", + " print(f\"{prmt.test_failed_msg} Inside {inspect.currentframe().f_code.co_name}...\")\n", + " print(f\"... for {cq.date}, auct_type {auct_type_specified}...\")\n", + " print(f\"... available had some other date_level than cq.date. Here's avail_1q:\")\n", + " print(avail_1q)\n", + " print()\n", + " else:\n", + " pass\n", + " \n", + " elif len(avail_1q) == 0:\n", + " print(f\"{prmt.test_failed_msg} Inside {inspect.currentframe().f_code.co_name}...\")\n", + " print(f\"... for {cq.date}, auct_type {auct_type_specified}, available is empty df.\")\n", + " print()\n", + " \n", + " avail_accum = avail_accum.append(avail_1q)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_for_duplicated_indices(avail_accum, parent_fn)\n", + " test_for_negative_values(avail_accum, parent_fn)\n", + "\n", + " return(avail_accum)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def CA_state_owned_make_available(all_accts, auct_type):\n", + " \"\"\"\n", + " State-owned allowances in auct_hold are made available when date_level == cq.date.\n", + " \n", + " Works for current auction and advance auction; specified by argument auct_type.\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start), for auct_type {auct_type}\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get allowances in auct_hold, for current auction, for date_level == cq.date\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')==auct_type\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('status')=='not_avail'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " \n", + " # update status to 'available'\n", + " avail = all_accts.loc[mask]\n", + " mapping_dict = {'status': 'available'}\n", + " avail = multiindex_change(avail, mapping_dict)\n", + " \n", + " # combine avail with remainder (~mask)\n", + " all_accts = pd.concat([avail, all_accts.loc[~mask]], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end), for auct_type {auct_type}\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def redesignate_unsold_advance_as_advance(all_accts, juris):\n", + " \"\"\"\n", + " Redesignation of unsold allowances from advance auctions, to later advance auctions.\n", + " \n", + " Only applies to CA; QC does not have similar rule.\n", + " \n", + " Based on regulations:\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)\n", + " QC: [QC regs Jan 2018], Section 54. states that unsold advance will only be redesignated as current\n", + " \n", + " If advance allowances remain unsold in one auction, they can be redesignated to a later advance auction.\n", + " But this will only occur after two consecutive auctions have sold out (sold above the floor price).\n", + " If any advance allowances remain unsold at the end of a calendar year, they are retained for \n", + " redesignation to a later current auction.\n", + " \n", + " Therefore the only situation in which allowances unsold in advance auctions can be redesignated \n", + " to another advance auction is if:\n", + " 1. some allowances are unsold in advance auction in Q1\n", + " 2. Q2 and Q3 advance auctions sell out\n", + " And therefore the redesignations can only occur in Q4 of any given year\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # check sales pct in Q1 of cq.date.year\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains(juris)) &\n", + " (df.index.get_level_values('auct_type')=='advance')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " \n", + " # if no sales pct for Q1 of cq.date.year (i.e., for CA in 2012, or QC in 2013), then return NaN\n", + " try:\n", + " sales_pct_adv_Q1 = df.at[f\"{cq.date.year}Q1\"]\n", + " except:\n", + " sales_pct_adv_Q1 = np.NaN\n", + " \n", + " if sales_pct_adv_Q1 < float(1) and cq.date.quarter == 4: \n", + " # auction does not sell out\n", + " # then check sales pct in Q2 & Q3:\n", + " sales_pct_adv_Q2 = df.at[f\"{cq.date.year}Q2\"]\n", + " sales_pct_adv_Q3 = df.at[f\"{cq.date.year}Q3\"]\n", + " \n", + "# # for debugging\n", + "# if use_fake_data == True:\n", + "# # for 2017Q4, override actual value for adv sales in 2017Q2; set to 100%\n", + "# # this allows redesignation of unsold from 2017Q1 in 2017Q4\n", + "# if cq.date == quarter_period('2017Q4'):\n", + "# sales_pct_adv_Q2 = float(1)\n", + "# # end debugging\n", + " \n", + " if sales_pct_adv_Q2 == float(1) and sales_pct_adv_Q3 == float(1):\n", + " # 100% of auction sold; redesignate unsold from Q1, up to limit\n", + " \n", + " # first get the quantity available before reintro\n", + " # get allowances available for cq.date auction\n", + " mask1 = all_accts.index.get_level_values('juris')==juris\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " mask3 = all_accts.index.get_level_values('status')=='available'\n", + " mask4 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " adv_avail_1j_1q_tot = all_accts[mask]['quant'].sum() \n", + " \n", + " # max that can be redesignated is 25% of quantity already scheduled to be available\n", + " max_redes_adv = 0.25 * adv_avail_1j_1q_tot\n", + " \n", + " # get unsold from advance Q1, retained in auction holding account\n", + " unsold_adv_Q1 = all_accts.loc[(all_accts.index.get_level_values('acct_name')=='auct_hold') & \n", + " (all_accts.index.get_level_values('auct_type')=='advance') & \n", + " (all_accts.index.get_level_values('unsold_di')==f\"{cq.date.year}Q1\")]\n", + " \n", + " if len(unsold_adv_Q1) == 1:\n", + " # calculate quantity to be redesignated\n", + " redes_adv_quant = min(max_redes_adv, unsold_adv_Q1['quant'].sum())\n", + "\n", + " # create new df and specify quantity redesignated\n", + " redes_adv = unsold_adv_Q1.copy()\n", + " redes_adv['quant'] = float(0)\n", + " redes_adv.at[redes_adv.index[0], 'quant'] = redes_adv_quant\n", + " \n", + " # create to_remove df that will subtract from auct_hold\n", + " to_remove = -1 * redes_adv.copy()\n", + " \n", + " # update metadata in redes_adv\n", + " mapping_dict = {'newness': 'redes', \n", + " 'status': 'available', \n", + " 'date_level': cq.date}\n", + " redes_adv = multiindex_change(redes_adv, mapping_dict)\n", + " \n", + " # recombine dfs to create redesignated in auct_hold, and to remove quantity from unsold not_avail\n", + " all_accts = pd.concat([all_accts, redes_adv, to_remove], sort=False)\n", + " all_accts = all_accts.groupby(level=prmt.standard_MI_names).sum() \n", + " \n", + " else: \n", + " # len(unsold_adv_Q1) != 1\n", + " print(\"Error\" + \"! Selection of unsold_adv_Q1 did not return a single row; here's unsold_adv_Q1:\")\n", + " print(unsold_adv_Q1)\n", + " print()\n", + " else: \n", + " # end of \"if sales_pct_adv_Q2 == float(1) ...\"\n", + " pass\n", + " else: \n", + " # end of \"if sales_pct_adv_Q1 < float(1)\"\n", + " pass\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_auction_adv_all_accts(all_accts, juris):\n", + " \"\"\"\n", + " Process advance auctions for the specified jurisdiction.\n", + " \n", + " Calculates quantities sold based on percentages in auction_sales_pcts_all (historical and projected).\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get allowances available for cq.date auction\n", + " mask1 = all_accts.index.get_level_values('juris') == juris\n", + " mask2 = all_accts.index.get_level_values('auct_type') == 'advance'\n", + " mask3 = all_accts.index.get_level_values('status') == 'available'\n", + " mask4 = all_accts.index.get_level_values('date_level') == cq.date\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " adv_avail_1j_1q = all_accts[mask]\n", + " remainder = all_accts[~mask]\n", + " \n", + " # get sales % for advance auctions, for this juris, for cq.date\n", + " # (works for auctions whether linked or unlinked, i.e., CA-only and CA-QC)\n", + " ser = prmt.auction_sales_pcts_all.copy()\n", + " ser = ser.loc[(ser.index.get_level_values('market').str.contains(juris)) &\n", + " (ser.index.get_level_values('auct_type') =='advance')]\n", + " ser.index = ser.index.droplevel(['market', 'auct_type'])\n", + " sales_pct_adv_1j_1q = ser.at[cq.date]\n", + " \n", + " # for this juris, quantity allowances sold = available quantity * sales_pct_adv_1q\n", + " sold_tot_1j_1q = adv_avail_1j_1q['quant'].sum() * sales_pct_adv_1j_1q\n", + " \n", + " # remaining: un-accumulator for all CA sales; initialize here; will be updated repeatedly below\n", + " # if there was redes in previous step, this calculates the quantity using the updated version of adv_avail_1j_1q\n", + " adv_remaining_to_sell_1j_1q = adv_avail_1j_1q['quant'].sum() * sales_pct_adv_1j_1q \n", + " \n", + " # before iterating, sort index so that redes sell first\n", + " adv_avail_1j_1q = adv_avail_1j_1q.sort_index(level=['newness'], ascending=[False])\n", + " \n", + " if sales_pct_adv_1j_1q == float(1):\n", + " # then all sell:\n", + " adv_sold_1j_1q = adv_avail_1j_1q.copy()\n", + " \n", + " # and none remain in adv_avail_1j_1q\n", + " adv_avail_1j_1q['quant'] = float(0)\n", + " \n", + " elif sales_pct_adv_1j_1q < float(1):\n", + " # then assign limited sales to particular sets of allowances\n", + " # iterate through all rows for available allowances; remove those sold\n", + " \n", + " # create df to collect sold quantities; initialize with zeros\n", + " # sort_index so that earliest vintages are drawn from first\n", + " adv_avail_1j_1q = adv_avail_1j_1q.sort_index()\n", + " \n", + " adv_sold_1j_1q = adv_avail_1j_1q.copy()\n", + " adv_sold_1j_1q['quant'] = float(0)\n", + " \n", + " for row in adv_avail_1j_1q.index:\n", + " in_stock_row = adv_avail_1j_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, adv_remaining_to_sell_1j_1q)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " adv_remaining_to_sell_1j_1q = adv_remaining_to_sell_1j_1q - sold_from_row_quantity \n", + "\n", + " # update sold quantity & metadata \n", + " adv_sold_1j_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update adv_avail_1j_1q quantity (but not metadata)\n", + " adv_avail_1j_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + " # end of \"for row in adv_avail_1j_1q.index:\"\n", + " else:\n", + " print(\"Error\" + \"! Should not have reached this point; may be that sales_pct_adv_1j_1q == np.NaN\")\n", + " pass\n", + " \n", + " # those still remaining in adv_avail_1j_1q are unsold; update status from 'available' to 'unsold'\n", + " adv_unsold_1j_1q = adv_avail_1j_1q\n", + " mapping_dict = {'status': 'unsold'}\n", + " adv_unsold_1j_1q = multiindex_change(adv_unsold_1j_1q, mapping_dict)\n", + " \n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " adv_sold_1j_1q = multiindex_change(adv_sold_1j_1q, mapping_dict)\n", + " \n", + " # filter out any rows with zeros or fractional allowances\n", + " adv_sold_1j_1q = adv_sold_1j_1q.loc[(adv_sold_1j_1q['quant']>1e-7) | (adv_sold_1j_1q['quant']<-1e-7)]\n", + " adv_unsold_1j_1q = adv_unsold_1j_1q.loc[(adv_unsold_1j_1q['quant']>1e-7) | (adv_unsold_1j_1q['quant']<-1e-7)]\n", + " \n", + " # recombine\n", + " all_accts = pd.concat([adv_sold_1j_1q, \n", + " adv_unsold_1j_1q, \n", + " remainder], \n", + " sort=False)\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def consign_make_available_incl_redes(all_accts):\n", + " \"\"\"\n", + " Changes status of consignment allowances to 'available.'\n", + " \n", + " Runs at the start of each quarter, before that quarter's auction. \n", + " \n", + " Operates on all consigned allowances in auct_hold. \n", + " \n", + " So if there are unsold from previous quarter, these are redesinated.\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get consigned allowances in auct_hold\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " consign_mask = (mask1) & (mask2)\n", + " consign_avail = all_accts.loc[consign_mask]\n", + " \n", + " # change 'status' to 'available'\n", + " mapping_dict = {'status': 'available'}\n", + " consign_avail = multiindex_change(consign_avail, mapping_dict)\n", + " \n", + " # update metadata for redesignated allowances\n", + " # for those that went unsold before (unsold_di != prmt.NaT_proxy):\n", + " # change 'newness' to 'redes'\n", + " # change 'date_level' to cq.date\n", + " redes_mask = consign_avail.index.get_level_values('unsold_di') != prmt.NaT_proxy\n", + " consign_redes = consign_avail.loc[redes_mask]\n", + " mapping_dict = {'newness': 'redes', \n", + " 'date_level': cq.date}\n", + " consign_redes = multiindex_change(consign_redes, mapping_dict)\n", + " \n", + " # recombine to make new version of all_accts\n", + " all_accts = pd.concat([consign_redes, \n", + " consign_avail.loc[~redes_mask],\n", + " all_accts.loc[~consign_mask]\n", + " ], sort=False)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def redesignate_unsold_current_auct(all_accts, juris):\n", + " \"\"\"\n", + " Redesignates state-owned allowances that are eligible, changing 'status' to 'available'.\n", + " \n", + " (Consignment are redesignated by fn make)\n", + " \n", + " Note that this function only redesignates unsold from current auctions to later current auctions. \n", + " \n", + " This function doesn't redesignate unsold advance to later advance auctions.\n", + " \n", + " For applicable regs, see docstrings in functions called below: \n", + " redes_consign & reintro_update_unsold_all_juris\n", + " \n", + " \"\"\"\n", + " \n", + " if juris == 'CA':\n", + " cur_sell_out_counter = scenario_CA.cur_sell_out_counter\n", + " reintro_eligibility = scenario_CA.reintro_eligibility\n", + " elif juris == 'QC':\n", + " cur_sell_out_counter = scenario_QC.cur_sell_out_counter\n", + " reintro_eligibility = scenario_QC.reintro_eligibility\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " logging.info(f\"in {cq.date} juris {juris}, cur_sell_out_counter: {cur_sell_out_counter}\")\n", + " logging.info(f\"in {cq.date} juris {juris}, reintro_eligibility: {reintro_eligibility}\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # only do things in this function if reintro_eligibility == True\n", + " if reintro_eligibility == True:\n", + "\n", + " # ***** redesignation of advance as advance is done by fn redesignate_unsold_advance_as_advance *****\n", + " \n", + " # ***** redesignation of consignment is done by fn consign_make_available_incl_redes *****\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # redesignate unsold current state-owned (aka reintro)\n", + "\n", + " if prmt.run_tests == True:\n", + " # TEST: in all_accts, are available allowances only in auct_hold? & have only 'date_level'==cq.date? \n", + " # (if so, then selection below for auct_type current & status available will work properly)\n", + " available_sel = all_accts.loc[all_accts.index.get_level_values('status')=='available']\n", + " \n", + " if available_sel.empty == False:\n", + " # TEST: are avail only in auct_hold?\n", + " if available_sel.index.get_level_values('acct_name').unique().tolist() != ['auct_hold']:\n", + " print(f\"{prmt.test_failed_msg} Available allowances in account other than auct_hold. Here's available:\")\n", + " print(available_sel)\n", + " else:\n", + " pass\n", + " \n", + " # TEST: do avail have only date_level == cq.date?\n", + " if available_sel.index.get_level_values('date_level').unique().tolist() != [cq.date]:\n", + " print(f\"{prmt.test_failed_msg} Available allowances have date_level other than cq.date (%s). Here's available:\" % cq.date)\n", + " print(available_sel)\n", + " else:\n", + " pass\n", + " \n", + " else: # available_sel.empty == True\n", + " print(\"Warning\" + f\"! In {cq.date}, {inspect.currentframe().f_code.co_name}, available_sel is empty.\")\n", + " print(\"Because available_sel is empty, show auct_hold:\")\n", + " print(all_accts.loc[all_accts.index.get_level_values('acct_name')=='auct_hold'])\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # calculate max reintro\n", + " max_cur_reintro_1j_1q = calculate_max_cur_reintro(all_accts, juris)\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # if conditions are right, reintro any state-owned allowances\n", + " # including filter for only positive rows\n", + " mask1 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask3 = all_accts['quant']>0\n", + " unsold_cur_sum = all_accts.loc[(mask1) & (mask2) & (mask3)]['quant'].sum()\n", + " \n", + " if unsold_cur_sum > 0: \n", + " all_accts = reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q)\n", + " \n", + " else: # unsold sum was not > 0, so no unsold to redesignate\n", + " pass\n", + " else: # reintro_eligibility == False; nothing to do\n", + " pass\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def calculate_max_cur_reintro(all_accts, juris):\n", + " \"\"\"\n", + " Calculate the maximum quantity of state-owned allowances that went unsold at current auction that can be reintroduced.\n", + "\n", + " Based on regulations:\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)(C)\n", + " QC: [QC regs Sep 2017], Section 54\n", + " ON: [ON regs Jan 2018], Section 58(4)3\n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # select allowances available (state & consign), before redesignation of unsold current state-owned (aka reintro)\n", + " cur_avail_mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " cur_avail_mask2 = all_accts.index.get_level_values('status')=='available'\n", + " cur_avail_mask3 = all_accts.index.get_level_values('juris')==juris\n", + " cur_avail_mask = (cur_avail_mask1) & (cur_avail_mask2) & (cur_avail_mask3)\n", + " \n", + " cur_avail_1j_1q = all_accts.loc[cur_avail_mask]\n", + " cur_avail_1j_1q_tot = cur_avail_1j_1q['quant'].sum()\n", + "\n", + " # calculate maximum reintro quantity for specified juris\n", + " max_cur_reintro_1j_1q = cur_avail_1j_1q_tot * 0.25\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(max_cur_reintro_1j_1q)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def reintro_update_unsold_1j(all_accts, juris, max_cur_reintro_1j_1q):\n", + " \"\"\"\n", + " Takes unsold allowances, reintro some based on rules.\n", + " \n", + " This function is called only when reintro_eligibility == True.\n", + " \n", + " Rules on redesignation of unsold state-owned allowances (aka reintroduction):\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)(B) & (C)\n", + " QC: [QC regs Sep 2017], Section 54 (paragraph 1)\n", + " ON: [ON regs Jan 2018], Section 58(4)1\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " mask1 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask2 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask5 = all_accts['quant']>0\n", + " \n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " \n", + " # QC anomalies: add additional masks:\n", + " if cq.date == quarter_period('2015Q2'):\n", + " # only reintro vintage 2013\n", + " mask6 = all_accts.index.get_level_values('vintage')==2013\n", + " mask = mask & (mask6)\n", + " \n", + " reintro_eligible_1j = all_accts.loc[mask]\n", + " \n", + " elif cq.date == quarter_period('2015Q3') or cq.date == quarter_period('2015Q4'):\n", + " # block reintro\n", + " reintro_eligible_1j = prmt.standard_MI_empty.copy()\n", + " \n", + " else:\n", + " reintro_eligible_1j = all_accts.loc[mask]\n", + " \n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " if reintro_eligible_1j['quant'].sum() > 0:\n", + "\n", + " # accumulator: amount reintro in present auction\n", + " reintro_1q_quant = 0 # initialize\n", + "\n", + " # un-accumulator: amount remaining to be introduced\n", + " max_cur_reintro_1j_1q_remaining = max_cur_reintro_1j_1q\n", + " \n", + " # initialize df to collect all reintro\n", + " reintro_1j_1q = prmt.standard_MI_empty.copy()\n", + " \n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " reintro_eligible_1j = reintro_eligible_1j.sort_index()\n", + " \n", + " for row in reintro_eligible_1j.index:\n", + " if max_cur_reintro_1j_1q_remaining == 0:\n", + " break\n", + " \n", + " else:\n", + " reintro_one_batch_quantity = min(max_cur_reintro_1j_1q_remaining,\n", + " reintro_eligible_1j.at[row, 'quant'])\n", + "\n", + " # update accumulator for amount reintro so far in present quarter (may be more than one batch)\n", + " reintro_1q_quant += reintro_one_batch_quantity\n", + " \n", + " # update un-accumulator for max_cur_reintro_1j_1q_remaining (may be more than one batch)\n", + " max_cur_reintro_1j_1q_remaining += -1*reintro_one_batch_quantity\n", + "\n", + " # copy reintro_eligible_1j before update; use to create reintro_1j_1q \n", + " # create copy of reintro_eligible_1j & clear quantities (set equal to zero float)\n", + " reintro_one_batch = reintro_eligible_1j.copy()\n", + " reintro_one_batch.index.name = 'reintro_one_batch'\n", + " reintro_one_batch.name = 'reintro_one_batch'\n", + " reintro_one_batch['quant'] = float(0)\n", + " \n", + " # set new value for 'quant'; delete rows with zero quantity\n", + " reintro_one_batch.at[row, 'quant'] = reintro_one_batch_quantity\n", + " \n", + " # put reintro for this row into df for collecting all reintro for this juris\n", + " reintro_1j_1q = pd.concat([reintro_1j_1q, reintro_one_batch])\n", + " \n", + " # update reintro_eligible_1j to remove reintro_one_batch_quantity\n", + " reintro_eligible_1j.at[row, 'quant'] = reintro_eligible_1j.at[row, 'quant'] - reintro_one_batch_quantity \n", + "\n", + " # filter out rows with fractional allowances, zero, NaN\n", + " reintro_1j_1q = reintro_1j_1q.loc[(reintro_1j_1q['quant']>1e-7) | (reintro_1j_1q['quant']<-1e-7)].dropna()\n", + " reintro_1j_1q = reintro_1j_1q.dropna()\n", + " \n", + " # log the quantity reintroduced\n", + " logging.info(f\"in {cq.date} for juris {juris}, quantity reintro: {reintro_1j_1q['quant'].sum()}\")\n", + " \n", + " # don't need to update acct_name; should still be auct_hold\n", + " mapping_dict = {'newness': 'reintro', \n", + " 'status': 'available', \n", + " 'date_level': cq.date}\n", + " reintro_1j_1q = multiindex_change(reintro_1j_1q, mapping_dict)\n", + "\n", + " # filter out zero rows\n", + " reintro_eligible_1j = reintro_eligible_1j.loc[reintro_eligible_1j['quant']>0]\n", + " reintro_1j_1q = reintro_1j_1q.loc[reintro_1j_1q['quant']>0]\n", + " \n", + " # concat to recreate all_accts\n", + " # (alternative: create df of reintro to remove, then just concat all_accts_pos, to add, to remove)\n", + " all_accts = pd.concat([reintro_1j_1q, reintro_eligible_1j, remainder], sort=True)\n", + " all_accts = all_accts.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " else: # if reintro_eligible_1j['quant'].sum() is not > 0\n", + " pass\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def process_auction_cur_CA_all_accts(all_accts):\n", + " \"\"\"\n", + " Processes current auction for CA, applying the specified order of sales (when auctions don't sell out).\n", + " \n", + " Order of sales based on regs:\n", + " CA: [CA regs Oct 2017], § 95911(f)(1)\n", + "\n", + " CA: for consignment, how to split sales between entities:\n", + " [CA regs Oct 2017], § 95911(f)(2) \n", + " \n", + " Notes: Once it is confirmed to be working properly, this could be simplified by:\n", + " 1. not re-doing filtering from scratch each batch of allowances\n", + " 2. finishing new fn avail_to_sold_all_accts, to avoid repetitive code\n", + " \n", + " \"\"\"\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: check that all available allowances are in auct_hold\n", + " avail_for_test = all_accts.loc[all_accts.index.get_level_values('status')=='available']\n", + " avail_for_test_accts = avail_for_test.index.get_level_values('acct_name').unique().tolist()\n", + " if avail_for_test.empty == False:\n", + " if avail_for_test_accts != ['auct_hold']:\n", + " print(f\"{prmt.test_failed_msg} Some available allowances were in an account other than auct_hold. Here's available:\")\n", + " print(avail_for_test)\n", + " else: # avail_for_test_accts == ['auct_hold']\n", + " pass\n", + " else: # avail_for_test.empty == True\n", + " print(\"Warning\" + \"! avail_for_test is empty.\")\n", + " # END OF TEST\n", + " \n", + " # get sales % for current auctions, for this juris, for cq.date\n", + " # (works for auctions whether linked or unlinked, i.e., CA-only and CA-QC)\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains('CA')) &\n", + " (df.index.get_level_values('auct_type')=='current')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " sales_fract_cur_1j_1q = df.at[cq.date]\n", + " \n", + " # get current available allowances\n", + " # (it should be that all available allowances are in auct_hold)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " cur_avail_CA_1q = all_accts.loc[mask]\n", + " \n", + " not_cur_avail_CA_1q = all_accts.loc[~mask]\n", + " \n", + " if sales_fract_cur_1j_1q == 1.0:\n", + " # all available allowances are sold and transferred into gen_acct\n", + " cur_sold_CA_1q = cur_avail_CA_1q\n", + " mapping_dict = {'status': 'sold', 'acct_name': 'gen_acct'}\n", + " cur_sold_CA_1q = multiindex_change(cur_sold_CA_1q, mapping_dict)\n", + " \n", + " # recombine\n", + " all_accts = pd.concat([cur_sold_CA_1q, not_cur_avail_CA_1q], sort=False)\n", + " \n", + " else: # sales_fract_cur_1j_1q != 1.0:\n", + " # calculate quantity of CA allowances sold (and test that variable is a float)\n", + " cur_sold_1q_tot_CA = cur_avail_CA_1q['quant'].sum() * sales_fract_cur_1j_1q\n", + " \n", + " if prmt.run_tests == True:\n", + " test_if_value_is_float_or_np_float64(cur_sold_1q_tot_CA)\n", + "\n", + " # remaining: un-accumulator for all CA sales; initialize here; will be updated repeatedly below\n", + " cur_remaining_to_sell_1q_CA = cur_sold_1q_tot_CA.copy()\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: consignment are first\n", + " # use if statement (or alternative) for all types of consignment, \n", + " # because it is possible (although unlikely) that, in a particular quarter, entities may opt to consign 0\n", + " # even if they must consign more than 0 for the whole year\n", + " # earlier in model, amounts per quarter are calculated as one-quarter of annual total, but that may change in future\n", + "\n", + " # select consignment allowances (from allowances available in cq.date current auction)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " consign_avail_1q = all_accts.loc[mask]\n", + " not_consign_avail_1q = all_accts.loc[~mask]\n", + " \n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + "\n", + " # start by creating df from avail, with values zeroed out\n", + " consign_sold_1q = consign_avail_1q.copy()\n", + " consign_sold_1q['quant'] = float(0)\n", + "\n", + " # in regulations, for consignment, no sales priority of redesignated vs. newly available\n", + " # however, for simplicity and to match state-owned behavior, sort df so that redes will sell before new\n", + " # first sort by vintage, ascending=True (redes will be same vintage or earlier than newly available)\n", + " # then sort by newness, ascending=False (so that 'redes' will occur before 'new')\n", + " consign_avail_1q = consign_avail_1q.sort_index(level=['vintage', 'newness'], ascending=[True, False])\n", + "\n", + " for row in consign_avail_1q.index:\n", + " in_stock_row = consign_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_CA)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_CA = cur_remaining_to_sell_1q_CA - sold_from_row_quantity\n", + "\n", + " # update sold quantity\n", + " consign_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update avail quantity\n", + " consign_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + " # update metadata for sold\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " consign_sold_1q = multiindex_change(consign_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " consign_unsold_1q = consign_avail_1q\n", + "\n", + " # recombine to create new version of all_accts\n", + " all_accts = pd.concat([consign_sold_1q,\n", + " consign_unsold_1q,\n", + " not_consign_avail_1q], \n", + " sort=False)\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances\n", + " all_accts_after_consign_sales = all_accts['quant'].sum()\n", + " diff = all_accts_after_consign_sales - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in fn process_auction_cur_CA_all_accts, after consignment sales.\")\n", + " print(\"diff = all_accts_after_consign_sales - all_accts_sum_init:\")\n", + " print(diff)\n", + " print(\"all_accts_sum_init: %s\" % all_accts_sum_init)\n", + " print(\"consign_sold_1q sum: %s\" % consign_sold_1q['quant'].sum())\n", + " print(\"consign_unsold_1q sum: %s\" % consign_unsold_1q['quant'].sum())\n", + " print(\"not_consign_avail_1q sum: %s\" % not_consign_avail_1q['quant'].sum())\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: after consignment, reintro are next\n", + "\n", + " # extract reintro allowances from all_accts\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts.index.get_level_values('newness')=='reintro'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " reintro_avail_1q = all_accts.loc[mask]\n", + " not_reintro_avail_1q = all_accts.loc[~mask]\n", + "\n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + " \n", + " # start by creating df from avail, with values zeroed out\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " reintro_avail_1q = reintro_avail_1q.sort_index()\n", + " \n", + " reintro_sold_1q = reintro_avail_1q.copy()\n", + " reintro_sold_1q['quant'] = float(0)\n", + "\n", + " for row in reintro_avail_1q.index:\n", + " in_stock_row = reintro_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_CA)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_CA = cur_remaining_to_sell_1q_CA - sold_from_row_quantity\n", + "\n", + " # update sold quantity & metadata\n", + " reintro_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update reintro_avail_1q quantity (but not metadata)\n", + " reintro_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + "\n", + " # using all_accts:\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " reintro_sold_1q = multiindex_change(reintro_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " reintro_unsold_1q = reintro_avail_1q\n", + "\n", + " # recombine\n", + " all_accts = pd.concat([reintro_sold_1q,\n", + " reintro_unsold_1q,\n", + " not_reintro_avail_1q], \n", + " sort=False)\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + "\n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances\n", + " all_accts_after_reintro_sales = all_accts['quant'].sum()\n", + " diff = all_accts_after_reintro_sales - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in fn process_auction_cur_CA_all_accts, after reintro sales.\")\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # sales priority: state-owned allowances available for first time as current (including fka adv, if there are any)\n", + "\n", + " # extract state allowances new to current auctions (from all_accts)\n", + " mask1 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask2 = all_accts.index.get_level_values('status')=='available'\n", + " mask3 = all_accts.index.get_level_values('date_level')==cq.date\n", + " mask4 = all_accts.index.get_level_values('juris')=='CA'\n", + " mask5 = all_accts.index.get_level_values('newness')=='new'\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " new_avail_1q = all_accts.loc[mask]\n", + " not_new_avail_1q = all_accts.loc[~mask]\n", + "\n", + " # iterate through all rows for available allowances; remove those sold\n", + " # (code adapted from avail_to_sold)\n", + " \n", + " # start by creating df from avail, with values zeroed out\n", + " # sort_index to ensure that earliest vintages are drawn from first\n", + " new_avail_1q = new_avail_1q.sort_index()\n", + " \n", + " new_sold_1q = new_avail_1q.copy()\n", + " new_sold_1q['quant'] = float(0)\n", + "\n", + " for row in new_avail_1q.index:\n", + " in_stock_row = new_avail_1q.at[row, 'quant']\n", + "\n", + " sold_from_row_quantity = min(in_stock_row, cur_remaining_to_sell_1q_CA)\n", + "\n", + " if sold_from_row_quantity > 1e-7:\n", + " # update un-accumulator for jurisdiction\n", + " cur_remaining_to_sell_1q_CA = cur_remaining_to_sell_1q_CA - sold_from_row_quantity\n", + "\n", + " # update sold quantity & metadata\n", + " new_sold_1q.at[row, 'quant'] = sold_from_row_quantity\n", + "\n", + " # update new_avail_1q quantity (but not metadata)\n", + " new_avail_1q.at[row, 'quant'] = in_stock_row - sold_from_row_quantity\n", + "\n", + " else: # sold_from_row_quantity <= 1e-7:\n", + " pass\n", + "\n", + " # using all_accts:\n", + " # for those sold, update status from 'available' to 'sold' & update acct_name from 'auct_hold' to 'gen_acct'\n", + " mapping_dict = {'status': 'sold', \n", + " 'acct_name': 'gen_acct'}\n", + " new_sold_1q = multiindex_change(new_sold_1q, mapping_dict)\n", + "\n", + " # for unsold, metadata is updated for all allowance types at once, at end of this function\n", + " # unsold is what's left in avail df\n", + " new_unsold_1q = new_avail_1q\n", + "\n", + " # recombine & groupby sum\n", + " all_accts = pd.concat([new_sold_1q,\n", + " new_unsold_1q,\n", + " not_new_avail_1q], \n", + " sort=True).sort_index() \n", + " # all_accts = all_accts.groupby(prmt.standard_MI_names).sum()\n", + "\n", + " # clean-up\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)]\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances\n", + " all_accts_after_new_sales = all_accts['quant'].sum()\n", + " diff = all_accts_after_new_sales - all_accts_sum_init\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in fn process_auction_cur_CA_all_accts, after new sales.\")\n", + " # END OF TEST\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # update status for all unsold\n", + " all_accts = unsold_update_status(all_accts)\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + "\n", + " # filter out rows with fractional allowances or zero\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-7) | (all_accts['quant']<-1e-7)].dropna()\n", + "\n", + " # end of if-else statement that began \"if sales_fract_cur_1j_1q == 1.0)\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)\n", + "# end of process_auction_cur_CA_all_accts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def update_reintro_eligibility(juris):\n", + " \"\"\"\n", + " Updates reintro_eligibility (attribute of object) based on results of cq.date auction.\n", + " \n", + " Selects auction sales percentages for specified jurisdiction (to handle separate markets).\n", + " \n", + " For citation of regs about reintro for each jurisdiction, see docstring for reintro_update_unsold_1j.\n", + " \"\"\"\n", + " \n", + " # set local variables cur_sell_out_counter & reintro_eligibility\n", + " # (corresponding attributes of objects scenario_CA & scenario_QC are set at end of func)\n", + " if juris == 'CA':\n", + " cur_sell_out_counter = scenario_CA.cur_sell_out_counter\n", + " reintro_eligibility = scenario_CA.reintro_eligibility\n", + " elif juris == 'QC':\n", + " cur_sell_out_counter = scenario_QC.cur_sell_out_counter\n", + " reintro_eligibility = scenario_QC.reintro_eligibility\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " logging.info(f\"in {cq.date} for {juris}, cur_sell_out_counter is {cur_sell_out_counter} before update\")\n", + " logging.info(f\"in {cq.date} for {juris}, reintro_eligibility is {reintro_eligibility} before update\")\n", + " \n", + " # get sales % for advance auctions, for this juris, for cq.date\n", + " # (works for auctions both linked and unlinked, i.e., inputting juris=='CA' works for CA-only and CA-QC auctions)\n", + " df = prmt.auction_sales_pcts_all.copy()\n", + " df = df.loc[(df.index.get_level_values('market').str.contains(juris)) &\n", + " (df.index.get_level_values('auct_type')=='current')]\n", + " df.index = df.index.droplevel(['market', 'auct_type'])\n", + " sales_pct_cur_1j_1q = df.at[cq.date]\n", + " \n", + " # if sales_pct_cur_1j_1q is 1, returns True; else False\n", + " cur_sell_out = sales_pct_cur_1j_1q == float(1)\n", + "\n", + " if cur_sell_out == True:\n", + " cur_sell_out_counter = cur_sell_out_counter + 1\n", + " elif cur_sell_out == False:\n", + " # reset value\n", + " cur_sell_out_counter = 0\n", + " else:\n", + " print(\"Error\" + \"!: cur_sell_out is neither True nor False.\")\n", + "\n", + " if cur_sell_out_counter < 2:\n", + " reintro_eligibility = False\n", + " elif cur_sell_out_counter >= 2:\n", + " reintro_eligibility = True\n", + " else:\n", + " print(\"Error\" + \"!: cur_sell_out_counter has a problem\")\n", + "\n", + " logging.info(f\"in {cq.date} for {juris}, cur_sell_out_counter is {cur_sell_out_counter} after update\")\n", + " logging.info(f\"in {cq.date} for {juris}, reintro_eligibility is {reintro_eligibility} after update\")\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " # update attributes of objects scenario_CA & scenario_QC\n", + " if juris == 'CA':\n", + " scenario_CA.cur_sell_out_counter = cur_sell_out_counter\n", + " scenario_CA.reintro_eligibility = reintro_eligibility\n", + " elif juris == 'QC':\n", + " scenario_QC.cur_sell_out_counter = cur_sell_out_counter\n", + " scenario_QC.reintro_eligibility = reintro_eligibility\n", + "\n", + " # using object attributes, no return from this func" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_consign__from_limited_use_to_auct_hold(all_accts):\n", + " \"\"\"\n", + " Specified quantities remaining in limited_use account of a particular vintage will be moved to auct_hold.\n", + "\n", + " Allowances consigned must be transferred into auct_hold 75 days before the auction in which they will be available.\n", + " CA regs: § 95910(d)(4).\n", + " \n", + " Runs at the end of each quarter, after auction processed for that quarter.\n", + " \n", + " So, i.e., for Q3 auction ~Aug 15, transfer would occur ~June 1 (in Q2), after Q2 auction (~May 15).\n", + " \n", + " These allowances will become available in the following auction (one quarter after cq.date).\n", + " \n", + " Since this is for consignment, which are only in CA, it doesn't apply to QC or ON.\n", + " \"\"\" \n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # create next_q after cq.date (formatted as quarter)\n", + " next_q = (pd.to_datetime(f'{cq.date.year}Q{cq.date.quarter}') + DateOffset(months=3)).to_period('Q')\n", + " \n", + " # get quantity consigned in next_q (consign_next_q_quant)\n", + " # vintage of these allowances will always be next_q.year \n", + " # (true even for anomalous CA auction in 2012Q4; other jurisdictions don't have consignment)\n", + " # look up quantity consigned in next_q from historical record\n", + " consign_next_q_quant = prmt.consign_hist_proj.at[\n", + " ('auct_hold', 'CA', 'current', 'consign', next_q.year, 'new', 'not_avail', next_q, \n", + " prmt.NaT_proxy, prmt.NaT_proxy, 'MMTCO2e'), \n", + " 'quant']\n", + "\n", + " if prmt.run_tests == True:\n", + " test_if_value_is_float_or_np_float64(consign_next_q_quant)\n", + " \n", + " # get allowances in limited_use, inst_cat=='consign', for specified vintage\n", + " # and calculate sum of that set\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='limited_use'\n", + " mask2 = all_accts.index.get_level_values('inst_cat')=='consign'\n", + " mask3 = all_accts.index.get_level_values('vintage')==next_q.year\n", + " mask4 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4)\n", + " consign_not_avail = all_accts.loc[mask]\n", + " quant_not_avail = consign_not_avail['quant'].sum()\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: check that consign_not_avail is only 1 row\n", + " if len(consign_not_avail) != 1:\n", + " print(f\"{prmt.test_failed_msg} Expected consign_not_avail to have 1 row. Here's consign_not_avail:\")\n", + " print(consign_not_avail)\n", + " print(\"Here's all_accts.loc[mask1] (limited_use):\")\n", + " print(all_accts.loc[mask1])\n", + " print(\"Here's all_accts:\")\n", + " print(all_accts)\n", + " # END OF TEST\n", + " \n", + " # split consign_not_avail to put only the specified quantity into auct_hold; \n", + " # rest stays in limited_use\n", + " consign_avail = consign_not_avail.copy()\n", + " \n", + " # consign_avail and consign_not_avail have same index (before consign avail index updated below)\n", + " # use this common index for setting new values for quantity in each df\n", + " # (only works because each df is a single row, as tested for above)\n", + " index_first_row = consign_avail.index[0]\n", + " \n", + " # set quantity in consign_avail, using consign_next_q_quant\n", + " consign_avail.at[index_first_row, 'quant'] = consign_next_q_quant\n", + " \n", + " # update metadata: put into auct_hold\n", + " # this fn does not make these allowances available; this will occur in separate fn, at start of cq.date\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'newness': 'new', \n", + " 'status': 'not_avail', \n", + " 'date_level': next_q}\n", + " consign_avail = multiindex_change(consign_avail, mapping_dict)\n", + "\n", + " # update quantity in consign_not_avail, to remove those consigned for next_q\n", + " consign_not_avail.at[index_first_row, 'quant'] = quant_not_avail - consign_next_q_quant\n", + " \n", + " # get remainder of all_accts\n", + " all_accts_remainder = all_accts.loc[~mask]\n", + " \n", + " # recombine to get all_accts again\n", + " all_accts = pd.concat([consign_avail, \n", + " consign_not_avail, \n", + " all_accts_remainder], \n", + " sort=False)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_CA_alloc__from_ann_alloc_hold_to_general(all_accts):\n", + " \"\"\"\n", + " Transfers CA allocations out of annual allocation holding account, into general account.\n", + " \n", + " Transfers occur on Jan 1 of each year, per 95831(a)(6)(D) to (I).\n", + " \n", + " Note that Compliance Instrument Reports for Q4 of a given year are generated in early Jan of following year.\n", + " So CIRs for Q4 include these Jan 1 tranfers of CA allocations out of government accounts and into private accounts.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get all allowances in acct_name == 'ann_alloc_hold' & juris == 'CA'\n", + " mask1 = all_accts.index.get_level_values('acct_name') == 'ann_alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('juris') == 'CA'\n", + " mask = (mask1) & (mask2)\n", + " to_transfer = all_accts.loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " # change metadata for ann_alloc_hold allowances, to move to compliance account\n", + " mapping_dict = {'acct_name': 'gen_acct'}\n", + " to_transfer = multiindex_change(to_transfer, mapping_dict)\n", + " \n", + " # recombine dfs\n", + " all_accts = pd.concat([to_transfer, remainder])\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_cur__from_alloc_hold_to_auct_hold_historical(all_accts, juris):\n", + " \"\"\"\n", + " Transfer the known historical quantities of allowances available in current auctions.\n", + " \n", + " Processes all allowances for a given year (with date_level year == cq.date year).\n", + " \n", + " Occurs at the start of each year (Jan 1).\n", + " \n", + " Transfers specified allowances from alloc_hold to auct_hold.\n", + " \n", + " Operates only for newly available allowances, as specified manually in input file sheet 'qauct 2012-2017'.\n", + " \n", + " Operates only for state-owned allowances.\n", + " \n", + " Note: There is a separate fn for making the allowances in auct_hold available.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n", + " # get the historical data for cq.date.year\n", + " df = prmt.qauct_new_avail.copy()\n", + " mask1 = df.index.get_level_values('inst_cat')==juris\n", + " mask2 = df.index.get_level_values('auct_type')=='current'\n", + " mask3 = df.index.get_level_values('date_level').year==cq.date.year\n", + " mask4 = df.index.get_level_values('vintage')==cq.date.year\n", + " avail_1v = df.loc[(mask1) & (mask2) & (mask3) & (mask4)]\n", + " \n", + " # if partial year of historical data, fill in remaining auctions\n", + " # in avail_1v, get the max date_level, and then the quarter of that max date\n", + " max_quarter = avail_1v.index.get_level_values('date_level').max().quarter\n", + "\n", + " for quarter in range(max_quarter+1, 4+1): \n", + " # only enters loop if max_quarter < 4, which means partial year of data; \n", + " # make projection for remaining quarters,\n", + " # assume that future auctions will be same on average as auctions in year-to-date\n", + " \n", + " # quantity of allowances newly available so far in cq.date.year\n", + " avail_1v_tot_so_far = avail_1v['quant'].sum()\n", + " \n", + " # average quantity per auction\n", + " avail_1v_avg = avail_1v_tot_so_far / max_quarter\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # fill in remaining auctions\n", + " \n", + " # create new df and template row for appending to it\n", + " avail_1v_plus = avail_1v.copy()\n", + " template_row = avail_1v.loc[avail_1v_plus.index[-1:]]\n", + " \n", + " # determine new quarterly date for auction, set in level 'date_level'\n", + " future_date = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " mapping_dict = {'date_level': future_date}\n", + " template_row = multiindex_change(template_row, mapping_dict)\n", + "\n", + " # set new quantity\n", + " template_row.at[template_row.index, 'quant'] = avail_1v_avg\n", + "\n", + " # append to historical plus\n", + " avail_1v_plus = avail_1v_plus.append(template_row)\n", + " \n", + " # set the extended historical record to have same name as original df\n", + " avail_1v = avail_1v_plus\n", + "\n", + " # end of \"for quarter in range(max_quarter+1, 4+1):\"\n", + " \n", + " # Now there is a full year of data (either historical, or historical + projection for remaining quarters)\n", + " \n", + " # calculate total to be available, of current vintage (vintage == cq.date.year)\n", + " avail_1v_tot = avail_1v['quant'].sum()\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # already in auct_hold (state-owned), current vintage\n", + " # (unsold advance also added to auct_hold)\n", + " # may be multiple rows\n", + " mask1 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='current' # probably redundant\n", + " mask3 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask4 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " # calculate quantity of current vintage already in auct_hold\n", + " auct_hold_1v_tot = all_accts.loc[mask]['quant'].sum()\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # calculate quantity to transfer: only those needed to get avail_1v_tot into auct_hold\n", + " to_transfer_tot = avail_1v_tot - auct_hold_1v_tot\n", + "\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " # from alloc_hold, get cap allowances of specified juris & vintage, to be drawn from\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('juris')==juris\n", + " mask3 = all_accts.index.get_level_values('inst_cat')=='cap'\n", + " mask4 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + "\n", + " # split all_accts into two parts (for alloc_hold & not auct_hold)\n", + " alloc_hold_1v = all_accts.loc[mask]\n", + " not_alloc_hold = all_accts.loc[~mask]\n", + "\n", + " alloc_hold_1v_tot = alloc_hold_1v['quant'].sum()\n", + "\n", + " diff = alloc_hold_1v_tot - to_transfer_tot\n", + "\n", + " if diff < 1e-7:\n", + " # there are just enough allowances in alloc_hold *or* there is a shortfall in alloc_hold\n", + "\n", + " # then transfer everything in alloc_hold to auct_hold\n", + " # (will wind up with negative values in auct_hold after cur_upsample_avail_state_owned_historical)\n", + " from_alloc_hold = alloc_hold_1v.copy()\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'auct_type': 'current', \n", + " 'inst_cat': juris, \n", + " 'newness': 'new', \n", + " 'status': 'not_avail'}\n", + " from_alloc_hold = multiindex_change(from_alloc_hold, mapping_dict)\n", + "\n", + " # recreate all_accts\n", + " # note: removed deficit that had been calculated before\n", + " all_accts = pd.concat([not_alloc_hold, from_alloc_hold], sort=True)\n", + "\n", + " # groupby sum, splitting all_accts into pos & neg\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0]\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0]\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " elif diff > 1e-7:\n", + " # excess allowances in alloc_hold, so leave remainder in alloc_hold\n", + " # (reminder: diff = alloc_hold_1v_tot - to_transfer_tot)\n", + "\n", + " # split alloc_hold allowances into part needed and part left behind\n", + " from_alloc_hold = alloc_hold_1v.copy()\n", + " from_alloc_hold.at[from_alloc_hold.index, 'quant'] = to_transfer_tot\n", + "\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'auct_type': 'current', \n", + " 'inst_cat': juris, \n", + " 'newness': 'new', \n", + " 'status': 'not_avail'}\n", + " from_alloc_hold = multiindex_change(from_alloc_hold, mapping_dict)\n", + " \n", + " remainder_alloc_hold = alloc_hold_1v.copy()\n", + " # reminder: diff = alloc_hold_1v_tot - to_transfer_tot\n", + " remainder_alloc_hold.at[remainder_alloc_hold.index, 'quant'] = diff\n", + " # no change to metadata needed\n", + "\n", + " # concat \n", + " all_accts = pd.concat([not_alloc_hold, from_alloc_hold, remainder_alloc_hold], sort=True)\n", + "\n", + " # groupby sum, splitting all_accts into pos & neg\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>0].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<0].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + "\n", + " else:\n", + " print(f\"Shouldn't reach this point; diff is: {diff}\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def cur_upsample_avail_state_owned_historical(all_accts, juris):\n", + " \"\"\"\n", + " Takes allowances in auct_hold and assigns specific quantities to each auction within a given year.\n", + " \n", + " This isn't really an upsample, but it accomplishes the same end, of assigning quarterly quantities.\n", + " \n", + " Does this based on historical data.\n", + " \n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + "\n", + " # ~~~~~~~~~~~~~~~\n", + " # get auct_hold, state-owned, specified juris, with auct_type=='current'\n", + " # also exclude any future vintage allowances (unsold advance retained in auct_hold)\n", + " # select only \"current\" vintage == cq.date.year\n", + " # exclude any negative values in auct_hold\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('juris')==juris\n", + " mask3 = all_accts.index.get_level_values('auct_type')=='current'\n", + " mask4 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask5 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " annual_avail_1v = all_accts.loc[mask] \n", + " not_annual_avail_1v = all_accts.loc[~mask]\n", + " \n", + " # ~~~~~~~~~~~~~~~\n", + " # get historical data for how much newly available in each auction (quact_mod is only newly available)\n", + " # select only \"current\" vintage == cq.date.year\n", + " df = prmt.qauct_new_avail.copy()\n", + " hist_1v = df.loc[(df.index.get_level_values('inst_cat')==juris) &\n", + " (df.index.get_level_values('auct_type')=='current') &\n", + " (df.index.get_level_values('date_level').year==cq.date.year) & \n", + " (df.index.get_level_values('vintage')==cq.date.year)]\n", + " \n", + " max_date_cur = hist_1v.index.get_level_values('date_level').max()\n", + " latest_hist_year_cur = max_date_cur.year\n", + " latest_hist_quarter_cur = max_date_cur.quarter\n", + " \n", + " # ~~~~~~~~~~~~~~~~\n", + " # create hist_proj, in which projection will be added to historical\n", + " hist_proj = hist_1v.copy()\n", + " \n", + " if cq.date.year == latest_hist_year_cur and latest_hist_quarter_cur < 4:\n", + " # special case of partial year historical data\n", + " # need to fill in additional quarters, based on what's remaining in auct_hold\n", + " \n", + " # calculate number of remaining quarters\n", + " num_remaining_q = 4 - latest_hist_quarter_cur\n", + " \n", + " # remaining after historical quantities are made available prior to cq.date\n", + " remaining_tot = annual_avail_1v['quant'].sum() - hist_1v['quant'].sum()\n", + " \n", + " # average newly available per remaining quarter\n", + " avg_remaining = remaining_tot / num_remaining_q\n", + " \n", + " latest_hist_q = hist_1v.loc[hist_1v.index.get_level_values('date_level')==max_date_cur]\n", + " \n", + " # create proj_quarter, with new value (avg_remaining)\n", + " proj_qs = latest_hist_q.copy()\n", + " proj_qs.at[proj_qs.index, 'quant'] = avg_remaining # will fail if latest_hist_q is > 1 row\n", + " \n", + " # iterate through projection quarters (starting 1 quarter after latest_hist_quarter_cur)\n", + " for quarter in range(latest_hist_quarter_cur+1, 4+1): \n", + " date_1q = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " proj_1q = proj_qs.copy()\n", + " mapping_dict = {'date_level': date_1q}\n", + " proj_1q = multiindex_change(proj_1q, mapping_dict)\n", + " \n", + " hist_proj = pd.concat([hist_proj, proj_1q])\n", + " \n", + " all_accts = pd.concat([hist_proj, not_annual_avail_1v], sort=True)\n", + " \n", + " else: \n", + " # reached in 2 cases: \n", + " # case 1. cq.date.year < latest_hist_year_cur\n", + " # case 2. latest_hist_year_cur == cq.date.year and latest_hist_quarter_cur == 4:\n", + " # (this fn only runs if cq.date.year <= latest_hist_year_cur)\n", + " \n", + " # there is a full year of historical data for cq.date.year; \n", + " # simply use historical data\n", + " \n", + " # if hist_1v was more than was in alloc_hold, create deficit\n", + " hist_excess = hist_1v['quant'].sum() - annual_avail_1v['quant'].sum()\n", + " if hist_excess > 1e-7:\n", + " # create deficit in alloc_hold\n", + " # take row annual_avail_1v, update value\n", + " deficit = annual_avail_1v.copy()\n", + " deficit.at[deficit.index, 'quant'] = -1 * hist_excess\n", + " \n", + " # update metadata\n", + " mapping_dict = {'status': 'deficit', \n", + " 'date_level': cq.date}\n", + " deficit = multiindex_change(deficit, mapping_dict)\n", + " \n", + " all_accts = pd.concat([hist_1v, not_annual_avail_1v, deficit], sort=True)\n", + " \n", + " else:\n", + " all_accts = pd.concat([hist_1v, not_annual_avail_1v], sort=True)\n", + " \n", + "\n", + " # clean-up; exclude fractional, zero, NaN rows\n", + " all_accts = all_accts.loc[(all_accts['quant']>1e-10) | (all_accts['quant']<-1e-10)]\n", + " \n", + " # check for duplicate rows; if so, groupby sum for positive rows only\n", + " dups = all_accts.loc[all_accts.index.duplicated(keep=False)]\n", + " if dups.empty==False:\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7]\n", + " all_accts_pos = all_accts_pos.groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']>-1e-7]\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_cur__from_alloc_hold_to_auct_hold_projection(all_accts, juris):\n", + " \"\"\"\n", + " For projection, transfers all allowances remaining in alloc_hold to become state-owned current.\n", + " \n", + " Processes all allowances for a given year (with date_level year == cq.date year).\n", + " \n", + " Occurs at the start of each year (Jan 1).\n", + " \n", + " Transfers specified allowances from alloc_hold to auct_hold.\n", + " \n", + " There may already be unsold from advance auctions in auct_hold; if so, this fn does groupby sum.\n", + " \n", + " Note: There is a separate fn for making the allowances in auct_hold available.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get all allowances in alloc_hold, inst_cat=='cap', for specified vintage and juris\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='alloc_hold'\n", + " mask2 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask3 = all_accts.index.get_level_values('inst_cat')=='cap'\n", + " mask4 = all_accts.index.get_level_values('juris')==juris\n", + " mask5 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5)\n", + " \n", + " to_transfer = all_accts.loc[mask]\n", + " remainder = all_accts.loc[~mask]\n", + " \n", + " # ~~~~~~~~~~~~~~~~~~~~~~~\n", + " # for QC, set aside portion for full true-up (as initial estimated)\n", + " # first true-ups are distributed in Q3 of year after the initial allocation\n", + " if juris == 'QC':\n", + " # split to_transfer into two parts: set_aside_for_alloc and to_transfer\n", + " # will work only if:\n", + " # 1. to_transfer df is only 1 row\n", + " # 2. (cq.date.year, f\"{cq.date.year+1}Q1\") is unique in QC_alloc_full_proj\n", + " \n", + " # look up quantity that's set aside for full alloc\n", + " set_aside_for_alloc_quant = prmt.QC_alloc_full_proj.at[(cq.date.year, f\"{cq.date.year}Q1\"), 'quant']\n", + " \n", + " # create set_aside_for_alloc (df with 1 row)\n", + " set_aside_for_alloc = to_transfer.copy()\n", + " set_aside_for_alloc.at[set_aside_for_alloc.index[0], 'quant'] = set_aside_for_alloc_quant\n", + " \n", + " # update value in to_transfer\n", + " to_transfer_init = to_transfer.at[to_transfer.index[0], 'quant']\n", + " to_transfer.at[to_transfer.index[0], 'quant'] = to_transfer_init - set_aside_for_alloc_quant\n", + " \n", + " else: \n", + " # juris != 'QC'\n", + " set_aside_for_alloc = prmt.standard_MI_empty.copy()\n", + " # ~~~~~~~~~~~~~~~~~~~~~~~ \n", + " \n", + " # update metadata for state_alloc_hold, to put into auct_hold & turn into state-owned allowances\n", + " mapping_dict = {'acct_name': 'auct_hold', \n", + " 'inst_cat': juris, \n", + " 'auct_type': 'current',\n", + " 'newness': 'new', \n", + " 'status': 'not_avail'}\n", + " to_transfer = multiindex_change(to_transfer, mapping_dict)\n", + " \n", + " all_accts = pd.concat([to_transfer, set_aside_for_alloc, remainder], sort=False) \n", + " \n", + " dups = all_accts.loc[all_accts.index.duplicated(keep=False)]\n", + " \n", + " if dups.empty==False:\n", + " # there are duplicated indices; need to do groupby sum\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<-1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def cur_upsample_avail_state_owned_projection(all_accts, juris):\n", + " \"\"\"\n", + " For current auctions in a given year, sums newly available and advance unsold; upsamples and assigns date_level.\n", + " \n", + " This is idealized method for projections, in which one-quarter of annual total is available in each auction.\n", + " \n", + " Operates on specified juris, to keep current allowances of each jurisdiction separate.\n", + " \n", + " \"\"\"\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " dups = all_accts.loc[all_accts.index.duplicated(keep=False)]\n", + " if dups.empty==False:\n", + " # there are duplicated indices; need to do groupby sum\n", + " print(\"Warning\" + \"! There are duplicated indices; need to do groupby sum.\")\n", + " all_accts_pos = all_accts.loc[all_accts['quant']>1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts_neg = all_accts.loc[all_accts['quant']<-1e-7].groupby(level=prmt.standard_MI_names).sum()\n", + " all_accts = all_accts_pos.append(all_accts_neg)\n", + " \n", + " # get all current allowances in auct_hold, for specified vintage and juris\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('juris')==juris\n", + " mask3 = all_accts.index.get_level_values('vintage')==cq.date.year\n", + " mask4 = all_accts.index.get_level_values('auct_type')=='current'\n", + " # select state-owned allowances; those with inst_cat same as juris\n", + " mask5 = all_accts.index.get_level_values('inst_cat')==juris\n", + " mask6 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4) & (mask5) & (mask6)\n", + " auct_hold = all_accts.loc[mask]\n", + " not_auct_hold = all_accts.loc[~mask]\n", + " \n", + " if prmt.run_tests == True:\n", + " # tests: check that selection above has 'status'=='not_avail' & 'newness'=='new'\n", + " if auct_hold.index.get_level_values('newness').unique().tolist() != ['new']:\n", + " print(f\"{prmt.test_failed_msg} auct_hold had entries with newness != 'new'\")\n", + " if auct_hold.index.get_level_values('status').unique().tolist() != ['not_avail']:\n", + " print(f\"{prmt.test_failed_msg} auct_hold had entries with status != 'not_avail'\")\n", + " \n", + " # take total from above, divide by 4 to get average annual quantity\n", + " each_quarter = auct_hold / 4\n", + " \n", + " # create empty df; quarterly quantities with metadata for each quarter will be put into this df\n", + " upsampled_to_q = prmt.standard_MI_empty.copy()\n", + " \n", + " for quarter in [1, 2, 3, 4]:\n", + " one_quarter_date = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " one_quarter = each_quarter.copy()\n", + " mapping_dict = {'date_level': one_quarter_date}\n", + " one_quarter = multiindex_change(one_quarter, mapping_dict)\n", + " upsampled_to_q = pd.concat([upsampled_to_q, one_quarter])\n", + "\n", + " all_accts = upsampled_to_q.append(not_auct_hold)\n", + " \n", + " if prmt.run_tests == True: \n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_conservation_against_full_budget(all_accts, juris, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def upsample_advance_all_accts(all_accts):\n", + " \"\"\"\n", + " Takes annual quantities set aside for advance, upsamples to get quarterly quantities to be made available.\n", + " \n", + " Specifies date_level for each quarter, but does *not* assign status 'available'.\n", + " \"\"\"\n", + "\n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # select advance allowances to be upsampled\n", + " # when cq.date.year >= 2013, \n", + " # then upsample vintage = cq.date.year + 3\n", + " \n", + " mask1 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " mask2 = all_accts.index.get_level_values('vintage')==(cq.date.year+3)\n", + " mask3 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " # specifying acct_name=='auct_hold' shouldn't be necessary, but doesn't hurt\n", + " mask = (mask1) & (mask2) & (mask3)\n", + " \n", + " adv_to_upsample = all_accts.loc[mask] \n", + " \n", + " each_quarter = adv_to_upsample / 4\n", + " mapping_dict = {'status': 'not_avail'}\n", + " each_quarter = multiindex_change(each_quarter, mapping_dict)\n", + " \n", + " all_quarters = prmt.standard_MI_empty.copy()\n", + " for quarter in [1, 2, 3, 4]:\n", + " one_quarter_date = quarter_period(f\"{cq.date.year}Q{quarter}\")\n", + " one_quarter = each_quarter.copy() \n", + " mapping_dict = {'date_level': one_quarter_date}\n", + " one_quarter = multiindex_change(one_quarter, mapping_dict)\n", + " all_quarters = pd.concat([all_quarters, one_quarter], sort=True)\n", + "\n", + " # recombine:\n", + " all_accts = pd.concat([all_quarters, all_accts.loc[~mask]], sort=True)\n", + " \n", + " if prmt.run_tests == True:\n", + " # TEST: conservation of allowances just for upsampled part\n", + " diff = all_quarters['quant'].sum() - adv_to_upsample['quant'].sum()\n", + " if abs(diff) > 1e-7:\n", + " print(f\"{prmt.test_failed_msg} Allowances not conserved in upsample_advance_all_accts.\")\n", + " # END OF TEST\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def adv_unsold_to_cur_all_accts(all_accts):\n", + " \"\"\"\n", + " Function similar to adv_unsold_to_cur, but operating on all_accts.\n", + " \n", + " Takes any unsold allowances from advance auctions that are in auct_hold account,\n", + " and updates metadata to change them into current auction allowances.\n", + " \n", + " Sums any unsold across all quarters in a calendar year, \n", + " (which will become part of total state-owned allowances to be made available in current auctions).\n", + " \n", + " Based on regulations:\n", + " CA: [CA regs Oct 2017], § 95911(f)(3)(B) & (D)\n", + " QC: [QC regs Sep 2017], Section 54 (paragraph 2)\n", + " ON: [ON regs Jan 2018], Section 58(4)2\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # isolate allowances unsold at advance auctions\n", + " mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " mask2 = all_accts.index.get_level_values('auct_type')=='advance'\n", + " mask3 = all_accts.index.get_level_values('status')=='unsold'\n", + " mask4 = all_accts['quant'] > 0\n", + " mask = (mask1) & (mask2) & (mask3) & (mask4)\n", + " unsold_adv = all_accts.loc[mask]\n", + " all_accts_remainder = all_accts.loc[~mask]\n", + " \n", + " # update metadata in all_accts, creating adv_redes_to_cur\n", + " adv_redes_to_cur = unsold_adv.copy()\n", + " mapping_dict = {'auct_type': 'current', \n", + " 'newness': 'new', \n", + " 'status': 'not_avail',\n", + " 'date_level': prmt.NaT_proxy, \n", + " 'unsold_di': prmt.NaT_proxy, \n", + " 'unsold_dl': prmt.NaT_proxy}\n", + " adv_redes_to_cur = multiindex_change(adv_redes_to_cur, mapping_dict)\n", + " \n", + " # groupby sum to combine all unsold from advance auctions of a particular vintage\n", + " adv_redes_to_cur = adv_redes_to_cur.groupby(level=prmt.standard_MI_names).sum()\n", + " \n", + " # recombine adv_redes_to_cur with remainder\n", + " all_accts = pd.concat([adv_redes_to_cur, all_accts_remainder], sort=True)\n", + "\n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + "\n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def unsold_update_status(all_accts):\n", + " \"\"\"\n", + " Operates after auction, on any allowances still remaining in auct_hold with date_level == cq.date.\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # for unsold, update metadata:\n", + " # in all_accts, get any allowances remaining in auct_hold with date_level == cq.date\n", + " # separate them out\n", + " # change metadata: change newness to unsold & status to not_avail\n", + " # recombine with remainder of all_accts\n", + " unsold_mask1 = all_accts.index.get_level_values('acct_name')=='auct_hold'\n", + " unsold_mask2 = all_accts.index.get_level_values('date_level')==cq.date\n", + " unsold_mask3 = all_accts['quant'] > 0\n", + " unsold_mask = (unsold_mask1) & (unsold_mask2) & (unsold_mask3)\n", + " all_accts_unsold = all_accts.loc[unsold_mask]\n", + " all_accts_remainder = all_accts.loc[~unsold_mask]\n", + " \n", + " if all_accts_unsold['quant'].sum() > 0:\n", + " # update metadata in all_accts_unsold\n", + " mapping_dict = {'status': 'unsold', \n", + " 'unsold_dl': cq.date}\n", + " all_accts_unsold = multiindex_change(all_accts_unsold, mapping_dict)\n", + " \n", + " # separate those with an unsold_di != prmt.NaT_proxy from those with unsold_di == prmt.NaT_proxy\n", + " # for those with unsold_di == prmt.NaT_proxy (never unsold before), set new value of unsold_di to be cq.date; \n", + " # for the rest (were unsold before), don't change unsold_di\n", + " unsold_before_mask = all_accts_unsold.index.get_level_values('unsold_di')==prmt.NaT_proxy\n", + " unsold_di_NaT = all_accts_unsold.loc[unsold_before_mask]\n", + " mapping_dict = {'unsold_di': cq.date}\n", + " unsold_di_NaT = multiindex_change(unsold_di_NaT, mapping_dict)\n", + " \n", + " unsold_di_not_NaT = all_accts_unsold.loc[~unsold_before_mask]\n", + " \n", + " # recombine all_accts_remainder (above), unsold_di_NaT & unsold_di_not_NaT\n", + " all_accts = pd.concat([all_accts_remainder, \n", + " unsold_di_NaT,\n", + " unsold_di_not_NaT], sort=False)\n", + " \n", + " elif all_accts_unsold['quant'].sum() == 0.0:\n", + " pass\n", + " \n", + " else: # all_accts_unsold['quant'].sum() is neither > 0 nor == 0; is it negative? NaN?\n", + " print(\"Error\" + \"! all_accts_unsold['quant'] should be a float that's either zero or positive.\")\n", + " \n", + " if prmt.run_tests == True:\n", + " parent_fn = str(inspect.currentframe().f_code.co_name)\n", + " test_conservation_simple(all_accts, all_accts_sum_init, parent_fn)\n", + " test_for_duplicated_indices(all_accts, parent_fn)\n", + " test_for_negative_values(all_accts, parent_fn)\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (end)\")\n", + " \n", + " return(all_accts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def transfer_cur__from_alloc_hold_to_auct_hold_new_avail_anomalies(all_accts, juris):\n", + " \"\"\"\n", + " Force unsold allowances to be reintro (made available again) to reflect anomalies in historical data.\n", + " \n", + " Right now, only set up for QC.\n", + " \n", + " Anomalies are transfers from alloc_hold to auct_hold for vintages < cq.date.year.\n", + " \n", + " (Normal transfers, in \"historical\" version of this function, are for vintage == cq.date.year.)\n", + " \n", + " Occurs at the start of each year (Jan 1).\n", + " \n", + " Transfers specified allowances from alloc_hold to auct_hold.\n", + " \n", + " Operates only for newly available allowances, as specified manually in input file sheet 'quarterly auct hist'.\n", + " \n", + " Operates only for state-owned allowances.\n", + " \n", + " Note: There is a separate fn for making the allowances in auct_hold available.\n", + "\n", + " \"\"\"\n", + " \n", + " logging.info(f\"{inspect.currentframe().f_code.co_name} (start)\")\n", + " \n", + " # pre-test for conservation of allowances\n", + " all_accts_sum_init = all_accts['quant'].sum()\n", + " \n", + " # get the historical data for those allowances with vintages < date_level year\n", + " df = prmt.qauct_new_avail.copy()\n", + " hist_to_be_avail_anom = df.loc[(df.index.get_level_values('inst_cat')==juris) & \n", + " (df.index.get_level_values('auct_type')=='current') &\n", + " (df.index.get_level_values('date_level')==cq.date) & \n", + " (df.index.get_level_values('vintage')