From f754e7d382ab286d66b522df1eb187e703641ee6 Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 08:07:46 +0000 Subject: [PATCH 1/9] feat: Onboard COVID19 Google Mobility dataset --- .../_images/run_csv_transform_kub/Dockerfile | 38 ++++ .../run_csv_transform_kub/csv_transform.py | 146 ++++++++++++++ .../run_csv_transform_kub/requirements.txt | 3 + .../covid19_google_mobility_dataset.tf | 26 +++ .../_terraform/mobility_report_pipeline.tf | 39 ++++ .../_terraform/provider.tf | 28 +++ .../_terraform/variables.tf | 23 +++ datasets/covid19_google_mobility/dataset.yaml | 67 +++++++ .../mobility_report/mobility_report_dag.py | 176 +++++++++++++++++ .../mobility_report/pipeline.yaml | 182 ++++++++++++++++++ 10 files changed, 728 insertions(+) create mode 100644 datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile create mode 100644 datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py create mode 100644 datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt create mode 100644 datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf create mode 100644 datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf create mode 100644 datasets/covid19_google_mobility/_terraform/provider.tf create mode 100644 datasets/covid19_google_mobility/_terraform/variables.tf create mode 100644 datasets/covid19_google_mobility/dataset.yaml create mode 100644 datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py create mode 100644 datasets/covid19_google_mobility/mobility_report/pipeline.yaml diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile new file mode 100644 index 000000000..85af90570 --- /dev/null +++ b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile @@ -0,0 +1,38 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The base image for this build +# FROM gcr.io/google.com/cloudsdktool/cloud-sdk:slim +FROM python:3.8 + +# Allow statements and log messages to appear in Cloud logs +ENV PYTHONUNBUFFERED True + +# Copy the requirements file into the image +COPY requirements.txt ./ + +# Install the packages specified in the requirements file +RUN python3 -m pip install --no-cache-dir -r requirements.txt + +# The WORKDIR instruction sets the working directory for any RUN, CMD, +# ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile. +# If the WORKDIR doesn’t exist, it will be created even if it’s not used in +# any subsequent Dockerfile instruction +WORKDIR /custom + +# Copy the specific data processing script/s in the image under /custom/* +COPY ./csv_transform.py . + +# Command to run the data processing script when the container is run +CMD ["python3", "csv_transform.py"] diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py new file mode 100644 index 000000000..3b52d478a --- /dev/null +++ b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py @@ -0,0 +1,146 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import json +import logging +import math +import os +import pathlib +import typing + +import pandas as pd +import requests +from google.cloud import storage + + +def main( + source_url: str, + source_file: pathlib.Path, + target_file: pathlib.Path, + target_gcs_bucket: str, + target_gcs_path: str, + headers: typing.List[str], + rename_mappings: dict, + pipeline_name: str, +) -> None: + + logging.info( + f"Austin bikeshare {pipeline_name} process started at " + + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + ) + + logging.info("creating 'files' folder") + pathlib.Path("./files").mkdir(parents=True, exist_ok=True) + + logging.info(f"Downloading file from {source_url}...") + download_file(source_url, source_file) + + logging.info(f"Opening file {source_file}...") + df = pd.read_csv(str(source_file)) + + logging.info(f"Transforming {source_file}... ") + + logging.info(f"Transform: Rename columns.. {source_file}") + rename_headers(df, rename_mappings) + + logging.info(f"Transform: converting to integer {source_file}... ") + df["retail_and_recreation_percent_change_from_baseline"] = df[ + "retail_and_recreation_percent_change_from_baseline" + ].apply(convert_to_integer_string) + df["grocery_and_pharmacy_percent_change_from_baseline"] = df[ + "grocery_and_pharmacy_percent_change_from_baseline" + ].apply(convert_to_integer_string) + df["parks_percent_change_from_baseline"] = df[ + "parks_percent_change_from_baseline" + ].apply(convert_to_integer_string) + df["transit_stations_percent_change_from_baseline"] = df[ + "transit_stations_percent_change_from_baseline" + ].apply(convert_to_integer_string) + df["workplaces_percent_change_from_baseline"] = df[ + "workplaces_percent_change_from_baseline" + ].apply(convert_to_integer_string) + df["residential_percent_change_from_baseline"] = df[ + "residential_percent_change_from_baseline" + ].apply(convert_to_integer_string) + + logging.info("Transform: Reordering headers..") + df = df[headers] + + logging.info(f"Saving to output file.. {target_file}") + try: + save_to_new_file(df, file_path=str(target_file)) + except Exception as e: + logging.error(f"Error saving output file: {e}.") + + logging.info( + f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}" + ) + upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path) + + logging.info( + f"Austin bikeshare {pipeline_name} process completed at " + + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + ) + + +def convert_to_integer_string(input: typing.Union[str, float]) -> str: + str_val = "" + if not input or (math.isnan(input)): + str_val = "" + else: + str_val = str(int(round(input, 0))) + return str_val + + +def rename_headers(df: pd.DataFrame, rename_mappings: dict) -> None: + df.rename(columns=rename_mappings, inplace=True) + + +def save_to_new_file(df: pd.DataFrame, file_path: str) -> None: + df.to_csv(file_path, index=False) + + +def download_file(source_url: str, source_file: pathlib.Path) -> None: + logging.info(f"Downloading {source_url} into {source_file}") + r = requests.get(source_url, stream=True) + if r.status_code == 200: + with open(source_file, "wb") as f: + for chunk in r: + f.write(chunk) + else: + logging.error(f"Couldn't download {source_url}: {r.text}") + + +def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str) -> None: + storage_client = storage.Client() + bucket = storage_client.bucket(gcs_bucket) + blob = bucket.blob(gcs_path) + blob.upload_from_filename(file_path) + + +if __name__ == "__main__": + logging.getLogger().setLevel(logging.INFO) + + main( + source_url=os.environ["SOURCE_URL"], + source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(), + target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(), + target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"], + target_gcs_path=os.environ["TARGET_GCS_PATH"], + headers=json.loads(os.environ["CSV_HEADERS"]), + rename_mappings=json.loads(os.environ["RENAME_MAPPINGS"]), + pipeline_name=os.environ["PIPELINE_NAME"], + ) diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt new file mode 100644 index 000000000..f36704793 --- /dev/null +++ b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt @@ -0,0 +1,3 @@ +requests +pandas +google-cloud-storage diff --git a/datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf b/datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf new file mode 100644 index 000000000..186557df7 --- /dev/null +++ b/datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_dataset" "covid19_google_mobility" { + dataset_id = "covid19_google_mobility" + project = var.project_id + description = "Terms of use\nIn order to download or use the data or reports, you must agree to the Google Terms of Service: https://policies.google.com/terms\n\nDescription\nThis dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential.\n\nThis dataset is intended to help remediate the impact of COVID-19. It shouldn\u2019t be used for medical diagnostic, prognostic, or treatment purposes. It also isn\u2019t intended to be used for guidance on personal travel plans.\n\nTo learn more about the dataset, the place categories, and how we calculate these trends and preserve privacy, do the following:\n\n\u2022 Visit the help center: https://support.google.com/covid19-mobility.\n\n\u2022 Or, read the dataset documentation: https://www.google.com/covid19/mobility/data_documentation.html." +} + +output "bigquery_dataset-covid19_google_mobility-dataset_id" { + value = google_bigquery_dataset.covid19_google_mobility.dataset_id +} diff --git a/datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf b/datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf new file mode 100644 index 000000000..30414bb72 --- /dev/null +++ b/datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_table" "mobility_report" { + project = var.project_id + dataset_id = "covid19_google_mobility" + table_id = "mobility_report" + + description = "Terms of use By downloading or using the data, you agree to Google\u0027s Terms of Service: https://policies.google.com/terms Description This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. This dataset is intended to help remediate the impact of COVID-19. It shouldn\u2019t be used for medical diagnostic, prognostic, or treatment purposes. It also isn\u2019t intended to be used for guidance on personal travel plans. To learn more about the dataset, the place categories and how we calculate these trends and preserve privacy, read the data documentation: https://www.google.com/covid19/mobility/data_documentation.html" + + + + + depends_on = [ + google_bigquery_dataset.covid19_google_mobility + ] +} + +output "bigquery_table-mobility_report-table_id" { + value = google_bigquery_table.mobility_report.table_id +} + +output "bigquery_table-mobility_report-id" { + value = google_bigquery_table.mobility_report.id +} diff --git a/datasets/covid19_google_mobility/_terraform/provider.tf b/datasets/covid19_google_mobility/_terraform/provider.tf new file mode 100644 index 000000000..23ab87dcd --- /dev/null +++ b/datasets/covid19_google_mobility/_terraform/provider.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +provider "google" { + project = var.project_id + impersonate_service_account = var.impersonating_acct + region = var.region +} + +data "google_client_openid_userinfo" "me" {} + +output "impersonating-account" { + value = data.google_client_openid_userinfo.me.email +} diff --git a/datasets/covid19_google_mobility/_terraform/variables.tf b/datasets/covid19_google_mobility/_terraform/variables.tf new file mode 100644 index 000000000..c3ec7c506 --- /dev/null +++ b/datasets/covid19_google_mobility/_terraform/variables.tf @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +variable "project_id" {} +variable "bucket_name_prefix" {} +variable "impersonating_acct" {} +variable "region" {} +variable "env" {} + diff --git a/datasets/covid19_google_mobility/dataset.yaml b/datasets/covid19_google_mobility/dataset.yaml new file mode 100644 index 000000000..ce0ec9ba8 --- /dev/null +++ b/datasets/covid19_google_mobility/dataset.yaml @@ -0,0 +1,67 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dataset: + # The `dataset` block includes properties for your dataset that will be shown + # to users of your data on the Google Cloud website. + + name: covid19_google_mobility + + # A friendly, human-readable name of the dataset + friendly_name: covid19_google_mobility + + # A short, descriptive summary of the dataset. + description: |- + Terms of use + In order to download or use the data or reports, you must agree to the Google Terms of Service: https://policies.google.com/terms + + Description + This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. + + This dataset is intended to help remediate the impact of COVID-19. It shouldn’t be used for medical diagnostic, prognostic, or treatment purposes. It also isn’t intended to be used for guidance on personal travel plans. + + To learn more about the dataset, the place categories, and how we calculate these trends and preserve privacy, do the following: + + • Visit the help center: https://support.google.com/covid19-mobility. + + • Or, read the dataset documentation: https://www.google.com/covid19/mobility/data_documentation.html. + + + dataset_sources: ~ + + terms_of_use: ~ + + +resources: + # A list of Google Cloud resources needed by your dataset. In principle, all + # pipelines under a dataset should be able to share these resources. + + - type: bigquery_dataset + # Google BigQuery dataset to namespace all tables managed by this folder + + dataset_id: covid19_google_mobility + description: |- + Terms of use + In order to download or use the data or reports, you must agree to the Google Terms of Service: https://policies.google.com/terms + + Description + This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. + + This dataset is intended to help remediate the impact of COVID-19. It shouldn’t be used for medical diagnostic, prognostic, or treatment purposes. It also isn’t intended to be used for guidance on personal travel plans. + + To learn more about the dataset, the place categories, and how we calculate these trends and preserve privacy, do the following: + + • Visit the help center: https://support.google.com/covid19-mobility. + + • Or, read the dataset documentation: https://www.google.com/covid19/mobility/data_documentation.html. diff --git a/datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py b/datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py new file mode 100644 index 000000000..877154cbf --- /dev/null +++ b/datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py @@ -0,0 +1,176 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from airflow import DAG +from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator + +default_args = { + "owner": "Google", + "depends_on_past": False, + "start_date": "2021-03-01", +} + + +with DAG( + dag_id="covid19_google_mobility.mobility_report", + default_args=default_args, + max_active_runs=1, + schedule_interval="@daily", + catchup=False, + default_view="graph", +) as dag: + + # Run CSV transform within kubernetes pod + mobility_report_transform_csv = kubernetes_pod_operator.KubernetesPodOperator( + task_id="mobility_report_transform_csv", + startup_timeout_seconds=600, + name="mobility_report", + namespace="default", + affinity={ + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cloud.google.com/gke-nodepool", + "operator": "In", + "values": ["pool-e2-standard-4"], + } + ] + } + ] + } + } + }, + image_pull_policy="Always", + image="{{ var.json.covid19_google_mobility.container_registry.run_csv_transform_kub }}", + env_vars={ + "SOURCE_URL": "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv", + "SOURCE_FILE": "files/data.csv", + "TARGET_FILE": "files/data_output.csv", + "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_PATH": "data/covid19_google_mobility/mobility_report/data_output.csv", + "PIPELINE_NAME": "mobility_report", + "CSV_HEADERS": '["country_region_code" ,"country_region" ,"sub_region_1" ,"sub_region_2" ,"metro_area" ,"iso_3166_2_code" ,"census_fips_code" ,"place_id" ,"date" ,"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline"]', + "RENAME_MAPPINGS": '{"country_region_code":"country_region_code" ,"country_region":"country_region" ,"sub_region_1":"sub_region_1" ,"sub_region_2":"sub_region_2" ,"metro_area":"metro_area" ,"iso_3166_2_code":"iso_3166_2_code" ,"census_fips_code":"census_fips_code" ,"place_id":"place_id" ,"date":"date" ,"retail_and_recreation_percent_change_from_baseline":"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline":"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline":"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline":"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline":"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline":"residential_percent_change_from_baseline"}', + }, + resources={"request_memory": "2G", "request_cpu": "1"}, + ) + + # Task to load CSV data to a BigQuery table + load_mobility_report_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( + task_id="load_mobility_report_to_bq", + bucket="{{ var.json.shared.composer_bucket }}", + source_objects=["data/covid19_google_mobility/mobility_report/data_output.csv"], + source_format="CSV", + destination_project_dataset_table="covid19_google_mobility.mobility_report", + skip_leading_rows=1, + write_disposition="WRITE_TRUNCATE", + schema_fields=[ + { + "name": "country_region_code", + "type": "string", + "description": "2 letter alpha code for the country/region in which changes are measured relative to the baseline. These values correspond with the ISO 3166-1 alpha-2 codes", + "mode": "nullable", + }, + { + "name": "country_region", + "type": "string", + "description": "The country/region in which changes are measured relative to the baseline", + "mode": "nullable", + }, + { + "name": "sub_region_1", + "type": "string", + "description": "First geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities", + "mode": "nullable", + }, + { + "name": "sub_region_2", + "type": "string", + "description": "Second geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities", + "mode": "nullable", + }, + { + "name": "metro_area", + "type": "string", + "description": "A specific metro area to measure mobility within a given city/metro area. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities", + "mode": "nullable", + }, + { + "name": "iso_3166_2_code", + "type": "string", + "description": "Unique identifier for the geographic region as defined by ISO Standard 3166-2.", + "mode": "nullable", + }, + { + "name": "census_fips_code", + "type": "string", + "description": "Unique identifier for each US county as defined by the US Census Bureau. Maps to county_fips_code in other tables", + "mode": "nullable", + }, + { + "name": "place_id", + "type": "string", + "description": "A textual identifier that uniquely identifies a place in the Google Places database and on Google Maps (details). For example ChIJd_Y0eVIvkIARuQyDN0F1LBA. For details see the following link: https://developers.google.com/places/web-service/place-id", + "mode": "nullable", + }, + { + "name": "date", + "type": "date", + "description": "Changes for a given date as compared to baseline. Baseline is the median value for the corresponding day of the week during the 5-week period Jan 3–Feb 6 2020.", + "mode": "nullable", + }, + { + "name": "retail_and_recreation_percent_change_from_baseline", + "type": "integer", + "description": "Mobility trends for places like restaurants cafes shopping centers theme parks museums libraries and movie theaters.", + "mode": "nullable", + }, + { + "name": "grocery_and_pharmacy_percent_change_from_baseline", + "type": "integer", + "description": "Mobility trends for places like grocery markets food warehouses farmers markets specialty food shops drug stores and pharmacies.", + "mode": "nullable", + }, + { + "name": "parks_percent_change_from_baseline", + "type": "integer", + "description": "Mobility trends for places like local parks national parks public beaches marinas dog parks plazas and public gardens.", + "mode": "nullable", + }, + { + "name": "transit_stations_percent_change_from_baseline", + "type": "integer", + "description": "Mobility trends for places like public transport hubs such as subway bus and train stations.", + "mode": "nullable", + }, + { + "name": "workplaces_percent_change_from_baseline", + "type": "integer", + "description": "Mobility trends for places of work.", + "mode": "nullable", + }, + { + "name": "residential_percent_change_from_baseline", + "type": "integer", + "description": "Mobility trends for places of residence.", + "mode": "nullable", + }, + ], + ) + + mobility_report_transform_csv >> load_mobility_report_to_bq diff --git a/datasets/covid19_google_mobility/mobility_report/pipeline.yaml b/datasets/covid19_google_mobility/mobility_report/pipeline.yaml new file mode 100644 index 000000000..edf650445 --- /dev/null +++ b/datasets/covid19_google_mobility/mobility_report/pipeline.yaml @@ -0,0 +1,182 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +resources: + + - type: bigquery_table + # Required Properties: + table_id: mobility_report + + # Description of the table + description: "Terms of use By downloading or using the data, you agree to Google's Terms of Service: https://policies.google.com/terms Description This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. This dataset is intended to help remediate the impact of COVID-19. It shouldn’t be used for medical diagnostic, prognostic, or treatment purposes. It also isn’t intended to be used for guidance on personal travel plans. To learn more about the dataset, the place categories and how we calculate these trends and preserve privacy, read the data documentation: https://www.google.com/covid19/mobility/data_documentation.html" + +dag: + airflow_version: 1 + initialize: + dag_id: mobility_report + default_args: + owner: "Google" + + # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded + depends_on_past: False + start_date: "2021-03-01" + max_active_runs: 1 + schedule_interval: "@daily" + catchup: False + default_view: graph + + tasks: + - operator: "KubernetesPodOperator" + + # Task description + description: "Run CSV transform within kubernetes pod" + + args: + + task_id: "mobility_report_transform_csv" + + startup_timeout_seconds: 600 + + # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id + name: "mobility_report" + + # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. + namespace: "default" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cloud.google.com/gke-nodepool + operator: In + values: + - "pool-e2-standard-4" + + + image_pull_policy: "Always" + + # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. + image: "{{ var.json.covid19_google_mobility.container_registry.run_csv_transform_kub }}" + + # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. + env_vars: + SOURCE_URL: "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv" + SOURCE_FILE: "files/data.csv" + TARGET_FILE: "files/data_output.csv" + TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_PATH: "data/covid19_google_mobility/mobility_report/data_output.csv" + PIPELINE_NAME: "mobility_report" + CSV_HEADERS: >- + ["country_region_code" ,"country_region" ,"sub_region_1" ,"sub_region_2" ,"metro_area" ,"iso_3166_2_code" ,"census_fips_code" ,"place_id" ,"date" ,"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline"] + RENAME_MAPPINGS: >- + {"country_region_code":"country_region_code" ,"country_region":"country_region" ,"sub_region_1":"sub_region_1" ,"sub_region_2":"sub_region_2" ,"metro_area":"metro_area" ,"iso_3166_2_code":"iso_3166_2_code" ,"census_fips_code":"census_fips_code" ,"place_id":"place_id" ,"date":"date" ,"retail_and_recreation_percent_change_from_baseline":"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline":"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline":"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline":"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline":"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline":"residential_percent_change_from_baseline"} + + + # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes + resources: + request_memory: "2G" + request_cpu: "1" + + - operator: "GoogleCloudStorageToBigQueryOperator" + description: "Task to load CSV data to a BigQuery table" + + args: + task_id: "load_mobility_report_to_bq" + + # The GCS bucket where the CSV file is located in. + bucket: "{{ var.json.shared.composer_bucket }}" + + # The GCS object path for the CSV file + source_objects: ["data/covid19_google_mobility/mobility_report/data_output.csv"] + source_format: "CSV" + destination_project_dataset_table: "covid19_google_mobility.mobility_report" + + # Use this if your CSV file contains a header row + skip_leading_rows: 1 + + # How to write data to the table: overwrite, append, or write if empty + # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition + write_disposition: "WRITE_TRUNCATE" + + # The BigQuery table schema based on the CSV file. For more info, see + # https://cloud.google.com/bigquery/docs/schemas. + # Always use snake_case and lowercase for column names, and be explicit, + # i.e. specify modes for all columns. + + schema_fields: + - name: "country_region_code" + type: "string" + description: "2 letter alpha code for the country/region in which changes are measured relative to the baseline. These values correspond with the ISO 3166-1 alpha-2 codes" + mode: "nullable" + - name: "country_region" + type: "string" + description: "The country/region in which changes are measured relative to the baseline" + mode: "nullable" + - name: "sub_region_1" + type: "string" + description: "First geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities" + mode: "nullable" + - name: "sub_region_2" + type: "string" + description: "Second geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities" + mode: "nullable" + - name: "metro_area" + type: "string" + description: "A specific metro area to measure mobility within a given city/metro area. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities" + mode: "nullable" + - name: "iso_3166_2_code" + type: "string" + description: "Unique identifier for the geographic region as defined by ISO Standard 3166-2." + mode: "nullable" + - name: "census_fips_code" + type: "string" + description: "Unique identifier for each US county as defined by the US Census Bureau. Maps to county_fips_code in other tables" + mode: "nullable" + - name: "place_id" + type: "string" + description: "A textual identifier that uniquely identifies a place in the Google Places database and on Google Maps (details). For example ChIJd_Y0eVIvkIARuQyDN0F1LBA. For details see the following link: https://developers.google.com/places/web-service/place-id" + mode: "nullable" + - name: "date" + type: "date" + description: "Changes for a given date as compared to baseline. Baseline is the median value for the corresponding day of the week during the 5-week period Jan 3–Feb 6 2020." + mode: "nullable" + - name: "retail_and_recreation_percent_change_from_baseline" + type: "integer" + description: "Mobility trends for places like restaurants cafes shopping centers theme parks museums libraries and movie theaters." + mode: "nullable" + - name: "grocery_and_pharmacy_percent_change_from_baseline" + type: "integer" + description: "Mobility trends for places like grocery markets food warehouses farmers markets specialty food shops drug stores and pharmacies." + mode: "nullable" + - name: "parks_percent_change_from_baseline" + type: "integer" + description: "Mobility trends for places like local parks national parks public beaches marinas dog parks plazas and public gardens." + mode: "nullable" + - name: "transit_stations_percent_change_from_baseline" + type: "integer" + description: "Mobility trends for places like public transport hubs such as subway bus and train stations." + mode: "nullable" + - name: "workplaces_percent_change_from_baseline" + type: "integer" + description: "Mobility trends for places of work." + mode: "nullable" + - name: "residential_percent_change_from_baseline" + type: "integer" + description: "Mobility trends for places of residence." + mode: "nullable" + + graph_paths: + - "mobility_report_transform_csv >> load_mobility_report_to_bq" From 8961adc11254e1d1252a9e88764bc0ea4f735585 Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 08:12:09 +0000 Subject: [PATCH 2/9] feat: Onboard COVID19 Google Mobility dataset --- .../_images/run_csv_transform_kub/csv_transform.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py index 3b52d478a..02bf40cb8 100644 --- a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py +++ b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py @@ -38,7 +38,7 @@ def main( ) -> None: logging.info( - f"Austin bikeshare {pipeline_name} process started at " + f"COVID19 Google mobility {pipeline_name} process started at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ) @@ -91,7 +91,7 @@ def main( upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path) logging.info( - f"Austin bikeshare {pipeline_name} process completed at " + f"COVID19 Google mobility {pipeline_name} process completed at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ) From f202e49a6e73ca1aceab22f02f78a2f6cbc693cc Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 08:28:28 +0000 Subject: [PATCH 3/9] chore: clean up --- .../_images/run_csv_transform_kub/Dockerfile | 38 ---- .../run_csv_transform_kub/csv_transform.py | 146 -------------- .../run_csv_transform_kub/requirements.txt | 3 - .../covid19_google_mobility_dataset.tf | 26 --- .../_terraform/mobility_report_pipeline.tf | 39 ---- .../_terraform/provider.tf | 28 --- .../_terraform/variables.tf | 23 --- datasets/covid19_google_mobility/dataset.yaml | 67 ------- .../mobility_report/mobility_report_dag.py | 176 ----------------- .../mobility_report/pipeline.yaml | 182 ------------------ 10 files changed, 728 deletions(-) delete mode 100644 datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile delete mode 100644 datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py delete mode 100644 datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt delete mode 100644 datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf delete mode 100644 datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf delete mode 100644 datasets/covid19_google_mobility/_terraform/provider.tf delete mode 100644 datasets/covid19_google_mobility/_terraform/variables.tf delete mode 100644 datasets/covid19_google_mobility/dataset.yaml delete mode 100644 datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py delete mode 100644 datasets/covid19_google_mobility/mobility_report/pipeline.yaml diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile deleted file mode 100644 index 85af90570..000000000 --- a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# The base image for this build -# FROM gcr.io/google.com/cloudsdktool/cloud-sdk:slim -FROM python:3.8 - -# Allow statements and log messages to appear in Cloud logs -ENV PYTHONUNBUFFERED True - -# Copy the requirements file into the image -COPY requirements.txt ./ - -# Install the packages specified in the requirements file -RUN python3 -m pip install --no-cache-dir -r requirements.txt - -# The WORKDIR instruction sets the working directory for any RUN, CMD, -# ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile. -# If the WORKDIR doesn’t exist, it will be created even if it’s not used in -# any subsequent Dockerfile instruction -WORKDIR /custom - -# Copy the specific data processing script/s in the image under /custom/* -COPY ./csv_transform.py . - -# Command to run the data processing script when the container is run -CMD ["python3", "csv_transform.py"] diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py deleted file mode 100644 index 02bf40cb8..000000000 --- a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/csv_transform.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import datetime -import json -import logging -import math -import os -import pathlib -import typing - -import pandas as pd -import requests -from google.cloud import storage - - -def main( - source_url: str, - source_file: pathlib.Path, - target_file: pathlib.Path, - target_gcs_bucket: str, - target_gcs_path: str, - headers: typing.List[str], - rename_mappings: dict, - pipeline_name: str, -) -> None: - - logging.info( - f"COVID19 Google mobility {pipeline_name} process started at " - + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) - ) - - logging.info("creating 'files' folder") - pathlib.Path("./files").mkdir(parents=True, exist_ok=True) - - logging.info(f"Downloading file from {source_url}...") - download_file(source_url, source_file) - - logging.info(f"Opening file {source_file}...") - df = pd.read_csv(str(source_file)) - - logging.info(f"Transforming {source_file}... ") - - logging.info(f"Transform: Rename columns.. {source_file}") - rename_headers(df, rename_mappings) - - logging.info(f"Transform: converting to integer {source_file}... ") - df["retail_and_recreation_percent_change_from_baseline"] = df[ - "retail_and_recreation_percent_change_from_baseline" - ].apply(convert_to_integer_string) - df["grocery_and_pharmacy_percent_change_from_baseline"] = df[ - "grocery_and_pharmacy_percent_change_from_baseline" - ].apply(convert_to_integer_string) - df["parks_percent_change_from_baseline"] = df[ - "parks_percent_change_from_baseline" - ].apply(convert_to_integer_string) - df["transit_stations_percent_change_from_baseline"] = df[ - "transit_stations_percent_change_from_baseline" - ].apply(convert_to_integer_string) - df["workplaces_percent_change_from_baseline"] = df[ - "workplaces_percent_change_from_baseline" - ].apply(convert_to_integer_string) - df["residential_percent_change_from_baseline"] = df[ - "residential_percent_change_from_baseline" - ].apply(convert_to_integer_string) - - logging.info("Transform: Reordering headers..") - df = df[headers] - - logging.info(f"Saving to output file.. {target_file}") - try: - save_to_new_file(df, file_path=str(target_file)) - except Exception as e: - logging.error(f"Error saving output file: {e}.") - - logging.info( - f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}" - ) - upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path) - - logging.info( - f"COVID19 Google mobility {pipeline_name} process completed at " - + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) - ) - - -def convert_to_integer_string(input: typing.Union[str, float]) -> str: - str_val = "" - if not input or (math.isnan(input)): - str_val = "" - else: - str_val = str(int(round(input, 0))) - return str_val - - -def rename_headers(df: pd.DataFrame, rename_mappings: dict) -> None: - df.rename(columns=rename_mappings, inplace=True) - - -def save_to_new_file(df: pd.DataFrame, file_path: str) -> None: - df.to_csv(file_path, index=False) - - -def download_file(source_url: str, source_file: pathlib.Path) -> None: - logging.info(f"Downloading {source_url} into {source_file}") - r = requests.get(source_url, stream=True) - if r.status_code == 200: - with open(source_file, "wb") as f: - for chunk in r: - f.write(chunk) - else: - logging.error(f"Couldn't download {source_url}: {r.text}") - - -def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str) -> None: - storage_client = storage.Client() - bucket = storage_client.bucket(gcs_bucket) - blob = bucket.blob(gcs_path) - blob.upload_from_filename(file_path) - - -if __name__ == "__main__": - logging.getLogger().setLevel(logging.INFO) - - main( - source_url=os.environ["SOURCE_URL"], - source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(), - target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(), - target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"], - target_gcs_path=os.environ["TARGET_GCS_PATH"], - headers=json.loads(os.environ["CSV_HEADERS"]), - rename_mappings=json.loads(os.environ["RENAME_MAPPINGS"]), - pipeline_name=os.environ["PIPELINE_NAME"], - ) diff --git a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt b/datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt deleted file mode 100644 index f36704793..000000000 --- a/datasets/covid19_google_mobility/_images/run_csv_transform_kub/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -requests -pandas -google-cloud-storage diff --git a/datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf b/datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf deleted file mode 100644 index 186557df7..000000000 --- a/datasets/covid19_google_mobility/_terraform/covid19_google_mobility_dataset.tf +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -resource "google_bigquery_dataset" "covid19_google_mobility" { - dataset_id = "covid19_google_mobility" - project = var.project_id - description = "Terms of use\nIn order to download or use the data or reports, you must agree to the Google Terms of Service: https://policies.google.com/terms\n\nDescription\nThis dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential.\n\nThis dataset is intended to help remediate the impact of COVID-19. It shouldn\u2019t be used for medical diagnostic, prognostic, or treatment purposes. It also isn\u2019t intended to be used for guidance on personal travel plans.\n\nTo learn more about the dataset, the place categories, and how we calculate these trends and preserve privacy, do the following:\n\n\u2022 Visit the help center: https://support.google.com/covid19-mobility.\n\n\u2022 Or, read the dataset documentation: https://www.google.com/covid19/mobility/data_documentation.html." -} - -output "bigquery_dataset-covid19_google_mobility-dataset_id" { - value = google_bigquery_dataset.covid19_google_mobility.dataset_id -} diff --git a/datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf b/datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf deleted file mode 100644 index 30414bb72..000000000 --- a/datasets/covid19_google_mobility/_terraform/mobility_report_pipeline.tf +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -resource "google_bigquery_table" "mobility_report" { - project = var.project_id - dataset_id = "covid19_google_mobility" - table_id = "mobility_report" - - description = "Terms of use By downloading or using the data, you agree to Google\u0027s Terms of Service: https://policies.google.com/terms Description This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. This dataset is intended to help remediate the impact of COVID-19. It shouldn\u2019t be used for medical diagnostic, prognostic, or treatment purposes. It also isn\u2019t intended to be used for guidance on personal travel plans. To learn more about the dataset, the place categories and how we calculate these trends and preserve privacy, read the data documentation: https://www.google.com/covid19/mobility/data_documentation.html" - - - - - depends_on = [ - google_bigquery_dataset.covid19_google_mobility - ] -} - -output "bigquery_table-mobility_report-table_id" { - value = google_bigquery_table.mobility_report.table_id -} - -output "bigquery_table-mobility_report-id" { - value = google_bigquery_table.mobility_report.id -} diff --git a/datasets/covid19_google_mobility/_terraform/provider.tf b/datasets/covid19_google_mobility/_terraform/provider.tf deleted file mode 100644 index 23ab87dcd..000000000 --- a/datasets/covid19_google_mobility/_terraform/provider.tf +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -provider "google" { - project = var.project_id - impersonate_service_account = var.impersonating_acct - region = var.region -} - -data "google_client_openid_userinfo" "me" {} - -output "impersonating-account" { - value = data.google_client_openid_userinfo.me.email -} diff --git a/datasets/covid19_google_mobility/_terraform/variables.tf b/datasets/covid19_google_mobility/_terraform/variables.tf deleted file mode 100644 index c3ec7c506..000000000 --- a/datasets/covid19_google_mobility/_terraform/variables.tf +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -variable "project_id" {} -variable "bucket_name_prefix" {} -variable "impersonating_acct" {} -variable "region" {} -variable "env" {} - diff --git a/datasets/covid19_google_mobility/dataset.yaml b/datasets/covid19_google_mobility/dataset.yaml deleted file mode 100644 index ce0ec9ba8..000000000 --- a/datasets/covid19_google_mobility/dataset.yaml +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -dataset: - # The `dataset` block includes properties for your dataset that will be shown - # to users of your data on the Google Cloud website. - - name: covid19_google_mobility - - # A friendly, human-readable name of the dataset - friendly_name: covid19_google_mobility - - # A short, descriptive summary of the dataset. - description: |- - Terms of use - In order to download or use the data or reports, you must agree to the Google Terms of Service: https://policies.google.com/terms - - Description - This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. - - This dataset is intended to help remediate the impact of COVID-19. It shouldn’t be used for medical diagnostic, prognostic, or treatment purposes. It also isn’t intended to be used for guidance on personal travel plans. - - To learn more about the dataset, the place categories, and how we calculate these trends and preserve privacy, do the following: - - • Visit the help center: https://support.google.com/covid19-mobility. - - • Or, read the dataset documentation: https://www.google.com/covid19/mobility/data_documentation.html. - - - dataset_sources: ~ - - terms_of_use: ~ - - -resources: - # A list of Google Cloud resources needed by your dataset. In principle, all - # pipelines under a dataset should be able to share these resources. - - - type: bigquery_dataset - # Google BigQuery dataset to namespace all tables managed by this folder - - dataset_id: covid19_google_mobility - description: |- - Terms of use - In order to download or use the data or reports, you must agree to the Google Terms of Service: https://policies.google.com/terms - - Description - This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. - - This dataset is intended to help remediate the impact of COVID-19. It shouldn’t be used for medical diagnostic, prognostic, or treatment purposes. It also isn’t intended to be used for guidance on personal travel plans. - - To learn more about the dataset, the place categories, and how we calculate these trends and preserve privacy, do the following: - - • Visit the help center: https://support.google.com/covid19-mobility. - - • Or, read the dataset documentation: https://www.google.com/covid19/mobility/data_documentation.html. diff --git a/datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py b/datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py deleted file mode 100644 index 877154cbf..000000000 --- a/datasets/covid19_google_mobility/mobility_report/mobility_report_dag.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from airflow import DAG -from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator - -default_args = { - "owner": "Google", - "depends_on_past": False, - "start_date": "2021-03-01", -} - - -with DAG( - dag_id="covid19_google_mobility.mobility_report", - default_args=default_args, - max_active_runs=1, - schedule_interval="@daily", - catchup=False, - default_view="graph", -) as dag: - - # Run CSV transform within kubernetes pod - mobility_report_transform_csv = kubernetes_pod_operator.KubernetesPodOperator( - task_id="mobility_report_transform_csv", - startup_timeout_seconds=600, - name="mobility_report", - namespace="default", - affinity={ - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "cloud.google.com/gke-nodepool", - "operator": "In", - "values": ["pool-e2-standard-4"], - } - ] - } - ] - } - } - }, - image_pull_policy="Always", - image="{{ var.json.covid19_google_mobility.container_registry.run_csv_transform_kub }}", - env_vars={ - "SOURCE_URL": "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv", - "SOURCE_FILE": "files/data.csv", - "TARGET_FILE": "files/data_output.csv", - "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", - "TARGET_GCS_PATH": "data/covid19_google_mobility/mobility_report/data_output.csv", - "PIPELINE_NAME": "mobility_report", - "CSV_HEADERS": '["country_region_code" ,"country_region" ,"sub_region_1" ,"sub_region_2" ,"metro_area" ,"iso_3166_2_code" ,"census_fips_code" ,"place_id" ,"date" ,"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline"]', - "RENAME_MAPPINGS": '{"country_region_code":"country_region_code" ,"country_region":"country_region" ,"sub_region_1":"sub_region_1" ,"sub_region_2":"sub_region_2" ,"metro_area":"metro_area" ,"iso_3166_2_code":"iso_3166_2_code" ,"census_fips_code":"census_fips_code" ,"place_id":"place_id" ,"date":"date" ,"retail_and_recreation_percent_change_from_baseline":"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline":"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline":"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline":"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline":"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline":"residential_percent_change_from_baseline"}', - }, - resources={"request_memory": "2G", "request_cpu": "1"}, - ) - - # Task to load CSV data to a BigQuery table - load_mobility_report_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( - task_id="load_mobility_report_to_bq", - bucket="{{ var.json.shared.composer_bucket }}", - source_objects=["data/covid19_google_mobility/mobility_report/data_output.csv"], - source_format="CSV", - destination_project_dataset_table="covid19_google_mobility.mobility_report", - skip_leading_rows=1, - write_disposition="WRITE_TRUNCATE", - schema_fields=[ - { - "name": "country_region_code", - "type": "string", - "description": "2 letter alpha code for the country/region in which changes are measured relative to the baseline. These values correspond with the ISO 3166-1 alpha-2 codes", - "mode": "nullable", - }, - { - "name": "country_region", - "type": "string", - "description": "The country/region in which changes are measured relative to the baseline", - "mode": "nullable", - }, - { - "name": "sub_region_1", - "type": "string", - "description": "First geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities", - "mode": "nullable", - }, - { - "name": "sub_region_2", - "type": "string", - "description": "Second geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities", - "mode": "nullable", - }, - { - "name": "metro_area", - "type": "string", - "description": "A specific metro area to measure mobility within a given city/metro area. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities", - "mode": "nullable", - }, - { - "name": "iso_3166_2_code", - "type": "string", - "description": "Unique identifier for the geographic region as defined by ISO Standard 3166-2.", - "mode": "nullable", - }, - { - "name": "census_fips_code", - "type": "string", - "description": "Unique identifier for each US county as defined by the US Census Bureau. Maps to county_fips_code in other tables", - "mode": "nullable", - }, - { - "name": "place_id", - "type": "string", - "description": "A textual identifier that uniquely identifies a place in the Google Places database and on Google Maps (details). For example ChIJd_Y0eVIvkIARuQyDN0F1LBA. For details see the following link: https://developers.google.com/places/web-service/place-id", - "mode": "nullable", - }, - { - "name": "date", - "type": "date", - "description": "Changes for a given date as compared to baseline. Baseline is the median value for the corresponding day of the week during the 5-week period Jan 3–Feb 6 2020.", - "mode": "nullable", - }, - { - "name": "retail_and_recreation_percent_change_from_baseline", - "type": "integer", - "description": "Mobility trends for places like restaurants cafes shopping centers theme parks museums libraries and movie theaters.", - "mode": "nullable", - }, - { - "name": "grocery_and_pharmacy_percent_change_from_baseline", - "type": "integer", - "description": "Mobility trends for places like grocery markets food warehouses farmers markets specialty food shops drug stores and pharmacies.", - "mode": "nullable", - }, - { - "name": "parks_percent_change_from_baseline", - "type": "integer", - "description": "Mobility trends for places like local parks national parks public beaches marinas dog parks plazas and public gardens.", - "mode": "nullable", - }, - { - "name": "transit_stations_percent_change_from_baseline", - "type": "integer", - "description": "Mobility trends for places like public transport hubs such as subway bus and train stations.", - "mode": "nullable", - }, - { - "name": "workplaces_percent_change_from_baseline", - "type": "integer", - "description": "Mobility trends for places of work.", - "mode": "nullable", - }, - { - "name": "residential_percent_change_from_baseline", - "type": "integer", - "description": "Mobility trends for places of residence.", - "mode": "nullable", - }, - ], - ) - - mobility_report_transform_csv >> load_mobility_report_to_bq diff --git a/datasets/covid19_google_mobility/mobility_report/pipeline.yaml b/datasets/covid19_google_mobility/mobility_report/pipeline.yaml deleted file mode 100644 index edf650445..000000000 --- a/datasets/covid19_google_mobility/mobility_report/pipeline.yaml +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -resources: - - - type: bigquery_table - # Required Properties: - table_id: mobility_report - - # Description of the table - description: "Terms of use By downloading or using the data, you agree to Google's Terms of Service: https://policies.google.com/terms Description This dataset aims to provide insights into what has changed in response to policies aimed at combating COVID-19. It reports movement trends over time by geography, across different categories of places such as retail and recreation, groceries and pharmacies, parks, transit stations, workplaces, and residential. This dataset is intended to help remediate the impact of COVID-19. It shouldn’t be used for medical diagnostic, prognostic, or treatment purposes. It also isn’t intended to be used for guidance on personal travel plans. To learn more about the dataset, the place categories and how we calculate these trends and preserve privacy, read the data documentation: https://www.google.com/covid19/mobility/data_documentation.html" - -dag: - airflow_version: 1 - initialize: - dag_id: mobility_report - default_args: - owner: "Google" - - # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded - depends_on_past: False - start_date: "2021-03-01" - max_active_runs: 1 - schedule_interval: "@daily" - catchup: False - default_view: graph - - tasks: - - operator: "KubernetesPodOperator" - - # Task description - description: "Run CSV transform within kubernetes pod" - - args: - - task_id: "mobility_report_transform_csv" - - startup_timeout_seconds: 600 - - # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id - name: "mobility_report" - - # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. - namespace: "default" - - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: cloud.google.com/gke-nodepool - operator: In - values: - - "pool-e2-standard-4" - - - image_pull_policy: "Always" - - # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. - image: "{{ var.json.covid19_google_mobility.container_registry.run_csv_transform_kub }}" - - # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. - env_vars: - SOURCE_URL: "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv" - SOURCE_FILE: "files/data.csv" - TARGET_FILE: "files/data_output.csv" - TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" - TARGET_GCS_PATH: "data/covid19_google_mobility/mobility_report/data_output.csv" - PIPELINE_NAME: "mobility_report" - CSV_HEADERS: >- - ["country_region_code" ,"country_region" ,"sub_region_1" ,"sub_region_2" ,"metro_area" ,"iso_3166_2_code" ,"census_fips_code" ,"place_id" ,"date" ,"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline"] - RENAME_MAPPINGS: >- - {"country_region_code":"country_region_code" ,"country_region":"country_region" ,"sub_region_1":"sub_region_1" ,"sub_region_2":"sub_region_2" ,"metro_area":"metro_area" ,"iso_3166_2_code":"iso_3166_2_code" ,"census_fips_code":"census_fips_code" ,"place_id":"place_id" ,"date":"date" ,"retail_and_recreation_percent_change_from_baseline":"retail_and_recreation_percent_change_from_baseline" ,"grocery_and_pharmacy_percent_change_from_baseline":"grocery_and_pharmacy_percent_change_from_baseline" ,"parks_percent_change_from_baseline":"parks_percent_change_from_baseline" ,"transit_stations_percent_change_from_baseline":"transit_stations_percent_change_from_baseline" ,"workplaces_percent_change_from_baseline":"workplaces_percent_change_from_baseline" ,"residential_percent_change_from_baseline":"residential_percent_change_from_baseline"} - - - # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes - resources: - request_memory: "2G" - request_cpu: "1" - - - operator: "GoogleCloudStorageToBigQueryOperator" - description: "Task to load CSV data to a BigQuery table" - - args: - task_id: "load_mobility_report_to_bq" - - # The GCS bucket where the CSV file is located in. - bucket: "{{ var.json.shared.composer_bucket }}" - - # The GCS object path for the CSV file - source_objects: ["data/covid19_google_mobility/mobility_report/data_output.csv"] - source_format: "CSV" - destination_project_dataset_table: "covid19_google_mobility.mobility_report" - - # Use this if your CSV file contains a header row - skip_leading_rows: 1 - - # How to write data to the table: overwrite, append, or write if empty - # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition - write_disposition: "WRITE_TRUNCATE" - - # The BigQuery table schema based on the CSV file. For more info, see - # https://cloud.google.com/bigquery/docs/schemas. - # Always use snake_case and lowercase for column names, and be explicit, - # i.e. specify modes for all columns. - - schema_fields: - - name: "country_region_code" - type: "string" - description: "2 letter alpha code for the country/region in which changes are measured relative to the baseline. These values correspond with the ISO 3166-1 alpha-2 codes" - mode: "nullable" - - name: "country_region" - type: "string" - description: "The country/region in which changes are measured relative to the baseline" - mode: "nullable" - - name: "sub_region_1" - type: "string" - description: "First geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities" - mode: "nullable" - - name: "sub_region_2" - type: "string" - description: "Second geographic sub-region in which the data is aggregated. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities" - mode: "nullable" - - name: "metro_area" - type: "string" - description: "A specific metro area to measure mobility within a given city/metro area. This varies by country/region to ensure privacy and public health value in consultation with local public health authorities" - mode: "nullable" - - name: "iso_3166_2_code" - type: "string" - description: "Unique identifier for the geographic region as defined by ISO Standard 3166-2." - mode: "nullable" - - name: "census_fips_code" - type: "string" - description: "Unique identifier for each US county as defined by the US Census Bureau. Maps to county_fips_code in other tables" - mode: "nullable" - - name: "place_id" - type: "string" - description: "A textual identifier that uniquely identifies a place in the Google Places database and on Google Maps (details). For example ChIJd_Y0eVIvkIARuQyDN0F1LBA. For details see the following link: https://developers.google.com/places/web-service/place-id" - mode: "nullable" - - name: "date" - type: "date" - description: "Changes for a given date as compared to baseline. Baseline is the median value for the corresponding day of the week during the 5-week period Jan 3–Feb 6 2020." - mode: "nullable" - - name: "retail_and_recreation_percent_change_from_baseline" - type: "integer" - description: "Mobility trends for places like restaurants cafes shopping centers theme parks museums libraries and movie theaters." - mode: "nullable" - - name: "grocery_and_pharmacy_percent_change_from_baseline" - type: "integer" - description: "Mobility trends for places like grocery markets food warehouses farmers markets specialty food shops drug stores and pharmacies." - mode: "nullable" - - name: "parks_percent_change_from_baseline" - type: "integer" - description: "Mobility trends for places like local parks national parks public beaches marinas dog parks plazas and public gardens." - mode: "nullable" - - name: "transit_stations_percent_change_from_baseline" - type: "integer" - description: "Mobility trends for places like public transport hubs such as subway bus and train stations." - mode: "nullable" - - name: "workplaces_percent_change_from_baseline" - type: "integer" - description: "Mobility trends for places of work." - mode: "nullable" - - name: "residential_percent_change_from_baseline" - type: "integer" - description: "Mobility trends for places of residence." - mode: "nullable" - - graph_paths: - - "mobility_report_transform_csv >> load_mobility_report_to_bq" From c0afb260ccb5194715df99205853c26f6e966662 Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 09:39:59 +0000 Subject: [PATCH 4/9] feat: Onboard World Bank Intl Debt dataset --- .../_images/run_csv_transform_kub/Dockerfile | 55 +++++ .../run_csv_transform_kub/csv_transform.py | 158 +++++++++++++ .../run_csv_transform_kub/requirements.txt | 3 + .../country_series_definitions_pipeline.tf | 39 ++++ .../_terraform/country_summary_pipeline.tf | 39 ++++ .../_terraform/provider.tf | 28 +++ .../_terraform/series_summary_pipeline.tf | 39 ++++ .../_terraform/series_times_pipeline.tf | 39 ++++ .../_terraform/variables.tf | 23 ++ .../world_bank_intl_debt_dataset.tf | 26 +++ .../country_series_definitions_dag.py | 92 ++++++++ .../country_series_definitions/pipeline.yaml | 131 +++++++++++ .../country_summary/country_summary_dag.py | 167 ++++++++++++++ .../country_summary/pipeline.yaml | 216 ++++++++++++++++++ datasets/world_bank_intl_debt/dataset.yaml | 43 ++++ .../series_summary/pipeline.yaml | 183 +++++++++++++++ .../series_summary/series_summary_dag.py | 120 ++++++++++ .../series_times/pipeline.yaml | 131 +++++++++++ .../series_times/series_times_dag.py | 90 ++++++++ 19 files changed, 1622 insertions(+) create mode 100644 datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile create mode 100644 datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py create mode 100644 datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt create mode 100644 datasets/world_bank_intl_debt/_terraform/country_series_definitions_pipeline.tf create mode 100644 datasets/world_bank_intl_debt/_terraform/country_summary_pipeline.tf create mode 100644 datasets/world_bank_intl_debt/_terraform/provider.tf create mode 100644 datasets/world_bank_intl_debt/_terraform/series_summary_pipeline.tf create mode 100644 datasets/world_bank_intl_debt/_terraform/series_times_pipeline.tf create mode 100644 datasets/world_bank_intl_debt/_terraform/variables.tf create mode 100644 datasets/world_bank_intl_debt/_terraform/world_bank_intl_debt_dataset.tf create mode 100644 datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py create mode 100644 datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml create mode 100644 datasets/world_bank_intl_debt/country_summary/country_summary_dag.py create mode 100644 datasets/world_bank_intl_debt/country_summary/pipeline.yaml create mode 100644 datasets/world_bank_intl_debt/dataset.yaml create mode 100644 datasets/world_bank_intl_debt/series_summary/pipeline.yaml create mode 100644 datasets/world_bank_intl_debt/series_summary/series_summary_dag.py create mode 100644 datasets/world_bank_intl_debt/series_times/pipeline.yaml create mode 100644 datasets/world_bank_intl_debt/series_times/series_times_dag.py diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile new file mode 100644 index 000000000..154e953cb --- /dev/null +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile @@ -0,0 +1,55 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The base image for this build +# FROM gcr.io/google.com/cloudsdktool/cloud-sdk:352.0.0-slim +# FROM python:3.8 +FROM python:3.8 + +# Allow statements and log messages to appear in Cloud logs +ENV PYTHONUNBUFFERED True + +RUN apt-get -y update && apt-get install -y apt-transport-https ca-certificates gnupg &&\ + echo "deb https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list &&\ + # echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - &&\ + apt-get -y update && apt-get install -y google-cloud-sdk + + # echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ + # curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + # apt-get update && apt-get install -y google-cloud-sdk=${CLOUD_SDK_VERSION}-0 $INSTALL_COMPONENTS && \ + + + +# echo "deb https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ +# apt-get update && apt-get install apt-transport-https ca-certificates gnupg && \ +# curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ +# apt-get update && sudo apt-get install google-cloud-sdk +# Copy the requirements file into the image +COPY requirements.txt ./ + +# Install the packages specified in the requirements file +RUN python3 -m pip install --no-cache-dir -r requirements.txt + +# The WORKDIR instruction sets the working directory for any RUN, CMD, +# ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile. +# If the WORKDIR doesn’t exist, it will be created even if it’s not used in +# any subsequent Dockerfile instruction +WORKDIR /custom + +# Copy the specific data processing script/s in the image under /custom/* +COPY ./csv_transform.py . + +# Command to run the data processing script when the container is run +CMD ["python3", "csv_transform.py"] \ No newline at end of file diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py new file mode 100644 index 000000000..63b4a51c5 --- /dev/null +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py @@ -0,0 +1,158 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import json +import logging +import math +import os +import pathlib +import subprocess +import typing + +import pandas as pd +from google.cloud import storage + + +def main( + source_url: str, + source_file: pathlib.Path, + column_name: str, + target_file: pathlib.Path, + target_gcs_bucket: str, + target_gcs_path: str, + headers: typing.List[str], + rename_mappings: dict, + pipeline_name: str, +) -> None: + + logging.info( + f"World Bank Intl Debt {pipeline_name} process started at " + + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + ) + + logging.info("creating 'files' folder") + pathlib.Path("./files").mkdir(parents=True, exist_ok=True) + + logging.info(f"Downloading file {source_url}") + download_file(source_url, source_file) + + logging.info(f"Opening file {source_file}") + df = pd.read_csv(source_file, skip_blank_lines=True) + + logging.info(f"Transforming {source_file} ... ") + + # print(df.columns) + + logging.info(f"Transform: Dropping column {column_name} ...") + delete_column(df, column_name) + + logging.info(f"Transform: Renaming columns for {pipeline_name} ...") + rename_headers(df, rename_mappings) + + if pipeline_name == "series_times": + logging.info(f"Transform: Extracting year for {pipeline_name} ...") + df["year"] = df["year"].apply(extract_year) + else: + df = df + + if pipeline_name == "country_summary": + logging.info("Transform: Creating a new column ...") + df["latest_water_withdrawal_data"] = "" + + logging.info("Transform: converting to integer ... ") + df["latest_industrial_data"] = df["latest_industrial_data"].apply( + convert_to_integer_string + ) + df["latest_trade_data"] = df["latest_trade_data"].apply( + convert_to_integer_string + ) + + else: + df = df + + logging.info(f"Transform: Reordering headers for {pipeline_name} ...") + df = df[headers] + + # print(df.head) + + logging.info(f"Saving to output file.. {target_file}") + try: + save_to_new_file(df, file_path=str(target_file)) + except Exception as e: + logging.error(f"Error saving output file: {e}.") + + logging.info( + f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}" + ) + upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path) + + logging.info( + f"World Bank Intl Debt {pipeline_name} process completed at " + + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + ) + + +def download_file(source_url: str, source_file: pathlib.Path) -> None: + subprocess.check_call(["gsutil", "cp", f"{source_url}", f"{source_file}"]) + + +def rename_headers(df: pd.DataFrame, rename_mappings: dict) -> None: + df.rename(columns=rename_mappings, inplace=True) + + +def delete_column(df: pd.DataFrame, column_name: str) -> None: + df = df.drop(column_name, axis=1, inplace=True) + + +def extract_year(string_val: str) -> str: + string_val = string_val[2:] + return string_val + + +def save_to_new_file(df: pd.DataFrame, file_path: str) -> None: + df.to_csv(file_path, index=False) + + +def convert_to_integer_string(input: typing.Union[str, float]) -> str: + str_val = "" + if not input or (math.isnan(input)): + str_val = "" + else: + str_val = str(int(round(input, 0))) + return str_val + + +def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str) -> None: + storage_client = storage.Client() + bucket = storage_client.bucket(gcs_bucket) + blob = bucket.blob(gcs_path) + blob.upload_from_filename(file_path) + + +if __name__ == "__main__": + logging.getLogger().setLevel(logging.INFO) + + main( + source_url=os.environ["SOURCE_URL"], + source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(), + column_name=os.environ["COLUMN_TO_REMOVE"], + target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(), + target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"], + target_gcs_path=os.environ["TARGET_GCS_PATH"], + headers=json.loads(os.environ["CSV_HEADERS"]), + rename_mappings=json.loads(os.environ["RENAME_MAPPINGS"]), + pipeline_name=os.environ["PIPELINE_NAME"], + ) diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt new file mode 100644 index 000000000..f36704793 --- /dev/null +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt @@ -0,0 +1,3 @@ +requests +pandas +google-cloud-storage diff --git a/datasets/world_bank_intl_debt/_terraform/country_series_definitions_pipeline.tf b/datasets/world_bank_intl_debt/_terraform/country_series_definitions_pipeline.tf new file mode 100644 index 000000000..5fd062ed4 --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/country_series_definitions_pipeline.tf @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_table" "country_series_definitions" { + project = var.project_id + dataset_id = "world_bank_intl_debt" + table_id = "country_series_definitions" + + description = "Country Series Definition table" + + + + + depends_on = [ + google_bigquery_dataset.world_bank_intl_debt + ] +} + +output "bigquery_table-country_series_definitions-table_id" { + value = google_bigquery_table.country_series_definitions.table_id +} + +output "bigquery_table-country_series_definitions-id" { + value = google_bigquery_table.country_series_definitions.id +} diff --git a/datasets/world_bank_intl_debt/_terraform/country_summary_pipeline.tf b/datasets/world_bank_intl_debt/_terraform/country_summary_pipeline.tf new file mode 100644 index 000000000..919f75e33 --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/country_summary_pipeline.tf @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_table" "country_summary" { + project = var.project_id + dataset_id = "world_bank_intl_debt" + table_id = "country_summary" + + description = "Country Summary table" + + + + + depends_on = [ + google_bigquery_dataset.world_bank_intl_debt + ] +} + +output "bigquery_table-country_summary-table_id" { + value = google_bigquery_table.country_summary.table_id +} + +output "bigquery_table-country_summary-id" { + value = google_bigquery_table.country_summary.id +} diff --git a/datasets/world_bank_intl_debt/_terraform/provider.tf b/datasets/world_bank_intl_debt/_terraform/provider.tf new file mode 100644 index 000000000..23ab87dcd --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/provider.tf @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +provider "google" { + project = var.project_id + impersonate_service_account = var.impersonating_acct + region = var.region +} + +data "google_client_openid_userinfo" "me" {} + +output "impersonating-account" { + value = data.google_client_openid_userinfo.me.email +} diff --git a/datasets/world_bank_intl_debt/_terraform/series_summary_pipeline.tf b/datasets/world_bank_intl_debt/_terraform/series_summary_pipeline.tf new file mode 100644 index 000000000..ed5a439cb --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/series_summary_pipeline.tf @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_table" "series_summary" { + project = var.project_id + dataset_id = "world_bank_intl_debt" + table_id = "series_summary" + + description = "Series Summary table" + + + + + depends_on = [ + google_bigquery_dataset.world_bank_intl_debt + ] +} + +output "bigquery_table-series_summary-table_id" { + value = google_bigquery_table.series_summary.table_id +} + +output "bigquery_table-series_summary-id" { + value = google_bigquery_table.series_summary.id +} diff --git a/datasets/world_bank_intl_debt/_terraform/series_times_pipeline.tf b/datasets/world_bank_intl_debt/_terraform/series_times_pipeline.tf new file mode 100644 index 000000000..14db30c48 --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/series_times_pipeline.tf @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_table" "series_times" { + project = var.project_id + dataset_id = "world_bank_intl_debt" + table_id = "series_times" + + description = "Series Times table" + + + + + depends_on = [ + google_bigquery_dataset.world_bank_intl_debt + ] +} + +output "bigquery_table-series_times-table_id" { + value = google_bigquery_table.series_times.table_id +} + +output "bigquery_table-series_times-id" { + value = google_bigquery_table.series_times.id +} diff --git a/datasets/world_bank_intl_debt/_terraform/variables.tf b/datasets/world_bank_intl_debt/_terraform/variables.tf new file mode 100644 index 000000000..c3ec7c506 --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/variables.tf @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +variable "project_id" {} +variable "bucket_name_prefix" {} +variable "impersonating_acct" {} +variable "region" {} +variable "env" {} + diff --git a/datasets/world_bank_intl_debt/_terraform/world_bank_intl_debt_dataset.tf b/datasets/world_bank_intl_debt/_terraform/world_bank_intl_debt_dataset.tf new file mode 100644 index 000000000..4eeba0a85 --- /dev/null +++ b/datasets/world_bank_intl_debt/_terraform/world_bank_intl_debt_dataset.tf @@ -0,0 +1,26 @@ +/** + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +resource "google_bigquery_dataset" "world_bank_intl_debt" { + dataset_id = "world_bank_intl_debt" + project = var.project_id + description = "World Bank Intl Debt" +} + +output "bigquery_dataset-world_bank_intl_debt-dataset_id" { + value = google_bigquery_dataset.world_bank_intl_debt.dataset_id +} diff --git a/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py b/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py new file mode 100644 index 000000000..afaf61b0a --- /dev/null +++ b/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py @@ -0,0 +1,92 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from airflow import DAG +from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator + +default_args = { + "owner": "Google", + "depends_on_past": False, + "start_date": "2021-03-01", +} + + +with DAG( + dag_id="world_bank_intl_debt.country_series_definitions", + default_args=default_args, + max_active_runs=1, + schedule_interval="@daily", + catchup=False, + default_view="graph", +) as dag: + + # Run CSV transform within kubernetes pod + country_series_definitions_transform_csv = kubernetes_pod_operator.KubernetesPodOperator( + task_id="country_series_definitions_transform_csv", + startup_timeout_seconds=600, + name="country_series_definitions", + namespace="default", + affinity={ + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cloud.google.com/gke-nodepool", + "operator": "In", + "values": ["pool-e2-standard-4"], + } + ] + } + ] + } + } + }, + image_pull_policy="Always", + image="{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}", + env_vars={ + "SOURCE_URL": "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry-Series.csv", + "SOURCE_FILE": "files/data.csv", + "COLUMN_TO_REMOVE": "Unnamed: 3", + "TARGET_FILE": "files/data_output.csv", + "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_PATH": "data/world_bank_intl_debt/country_series_definitions/data_output.csv", + "PIPELINE_NAME": "country_series_definitions", + "CSV_HEADERS": '["country_code" ,"series_code" ,"description"]', + "RENAME_MAPPINGS": '{"CountryCode":"country_code","SeriesCode":"series_code","DESCRIPTION":"description"}', + }, + resources={"request_memory": "2G", "request_cpu": "1"}, + ) + + # Task to load CSV data to a BigQuery table + load_country_series_definitions_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( + task_id="load_country_series_definitions_to_bq", + bucket="{{ var.json.shared.composer_bucket }}", + source_objects=[ + "data/world_bank_intl_debt/country_series_definitions/data_output.csv" + ], + source_format="CSV", + destination_project_dataset_table="world_bank_intl_debt.country_series_definitions", + skip_leading_rows=1, + write_disposition="WRITE_TRUNCATE", + schema_fields=[ + {"name": "country_code", "type": "string", "mode": "nullable"}, + {"name": "series_code", "type": "string", "mode": "nullable"}, + {"name": "description", "type": "string", "mode": "nullable"}, + ], + ) + + country_series_definitions_transform_csv >> load_country_series_definitions_to_bq diff --git a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml new file mode 100644 index 000000000..e428c4287 --- /dev/null +++ b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml @@ -0,0 +1,131 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +resources: + + - type: bigquery_table + # Required Properties: + table_id: country_series_definitions + + # Description of the table + description: "Country Series Definition table" + +dag: + airflow_version: 1 + initialize: + dag_id: country_series_definitions + default_args: + owner: "Google" + + # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded + depends_on_past: False + start_date: "2021-03-01" + max_active_runs: 1 + schedule_interval: "@daily" + catchup: False + default_view: graph + + tasks: + - operator: "KubernetesPodOperator" + + # Task description + description: "Run CSV transform within kubernetes pod" + + args: + + task_id: "country_series_definitions_transform_csv" + + startup_timeout_seconds: 600 + + # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id + name: "country_series_definitions" + + # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. + namespace: "default" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cloud.google.com/gke-nodepool + operator: In + values: + - "pool-e2-standard-4" + + + image_pull_policy: "Always" + + # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. + image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" + + # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. + env_vars: + SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry-Series.csv" + SOURCE_FILE: "files/data.csv" + COLUMN_TO_REMOVE: "Unnamed: 3" + TARGET_FILE: "files/data_output.csv" + TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_PATH: "data/world_bank_intl_debt/country_series_definitions/data_output.csv" + PIPELINE_NAME: "country_series_definitions" + CSV_HEADERS: >- + ["country_code" ,"series_code" ,"description"] + RENAME_MAPPINGS: >- + {"CountryCode":"country_code","SeriesCode":"series_code","DESCRIPTION":"description"} + # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes + resources: + request_memory: "2G" + request_cpu: "1" + + - operator: "GoogleCloudStorageToBigQueryOperator" + description: "Task to load CSV data to a BigQuery table" + + args: + task_id: "load_country_series_definitions_to_bq" + + # The GCS bucket where the CSV file is located in. + bucket: "{{ var.json.shared.composer_bucket }}" + + # The GCS object path for the CSV file + source_objects: ["data/world_bank_intl_debt/country_series_definitions/data_output.csv"] + source_format: "CSV" + destination_project_dataset_table: "world_bank_intl_debt.country_series_definitions" + + # Use this if your CSV file contains a header row + skip_leading_rows: 1 + + # How to write data to the table: overwrite, append, or write if empty + # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition + write_disposition: "WRITE_TRUNCATE" + + # The BigQuery table schema based on the CSV file. For more info, see + # https://cloud.google.com/bigquery/docs/schemas. + # Always use snake_case and lowercase for column names, and be explicit, + # i.e. specify modes for all columns. + + schema_fields: + - name: "country_code" + type: "string" + mode: "nullable" + - name: "series_code" + type: "string" + mode: "nullable" + - name: "description" + type: "string" + mode: "nullable" + + + graph_paths: + - "country_series_definitions_transform_csv >> load_country_series_definitions_to_bq" \ No newline at end of file diff --git a/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py b/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py new file mode 100644 index 000000000..1d667f8ad --- /dev/null +++ b/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py @@ -0,0 +1,167 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from airflow import DAG +from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator + +default_args = { + "owner": "Google", + "depends_on_past": False, + "start_date": "2021-03-01", +} + + +with DAG( + dag_id="world_bank_intl_debt.country_summary", + default_args=default_args, + max_active_runs=1, + schedule_interval="@daily", + catchup=False, + default_view="graph", +) as dag: + + # Run CSV transform within kubernetes pod + country_summary_transform_csv = kubernetes_pod_operator.KubernetesPodOperator( + task_id="country_summary_transform_csv", + startup_timeout_seconds=600, + name="country_summary", + namespace="default", + affinity={ + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cloud.google.com/gke-nodepool", + "operator": "In", + "values": ["pool-e2-standard-4"], + } + ] + } + ] + } + } + }, + image_pull_policy="Always", + image="{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}", + env_vars={ + "SOURCE_URL": "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry.csv", + "SOURCE_FILE": "files/data.csv", + "COLUMN_TO_REMOVE": "Unnamed: 31", + "TARGET_FILE": "files/data_output.csv", + "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_PATH": "data/world_bank_intl_debt/country_summary/data_output.csv", + "PIPELINE_NAME": "country_summary", + "CSV_HEADERS": '["country_code","short_name","table_name","long_name","two_alpha_code","currency_unit","special_notes","region","income_group","wb_2_code","national_accounts_base_year","national_accounts_reference_year","sna_price_valuation","lending_category","other_groups","system_of_national_accounts","alternative_conversion_factor","ppp_survey_year","balance_of_payments_manual_in_use","external_debt_reporting_status","system_of_trade","government_accounting_concept","imf_data_dissemination_standard","latest_population_census","latest_household_survey","source_of_most_recent_Income_and_expenditure_data","vital_registration_complete","latest_agricultural_census","latest_industrial_data","latest_trade_data","latest_water_withdrawal_data"]', + "RENAME_MAPPINGS": '{"Country Code":"country_code","Short Name":"short_name","Table Name":"table_name","Long Name":"long_name","2-alpha code":"two_alpha_code","Currency Unit":"currency_unit","Special Notes":"special_notes","Region":"region","Income Group":"income_group","WB-2 code":"wb_2_code","National accounts base year":"national_accounts_base_year","National accounts reference year":"national_accounts_reference_year","SNA price valuation":"sna_price_valuation","Lending category":"lending_category","Other groups":"other_groups","System of National Accounts":"system_of_national_accounts","Alternative conversion factor":"alternative_conversion_factor","PPP survey year":"ppp_survey_year","Balance of Payments Manual in use":"balance_of_payments_manual_in_use","External debt Reporting status":"external_debt_reporting_status","System of trade":"system_of_trade","Government Accounting concept":"government_accounting_concept","IMF data dissemination standard":"imf_data_dissemination_standard","Latest population census":"latest_population_census","Latest household survey":"latest_household_survey","Source of most recent Income and expenditure data":"source_of_most_recent_Income_and_expenditure_data","Vital registration complete":"vital_registration_complete","Latest agricultural census":"latest_agricultural_census","Latest industrial data":"latest_industrial_data","Latest trade data":"latest_trade_data"}', + }, + resources={"request_memory": "2G", "request_cpu": "1"}, + ) + + # Task to load CSV data to a BigQuery table + load_country_summary_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( + task_id="load_country_summary_to_bq", + bucket="{{ var.json.shared.composer_bucket }}", + source_objects=["data/world_bank_intl_debt/country_summary/data_output.csv"], + source_format="CSV", + destination_project_dataset_table="world_bank_intl_debt.country_summary", + skip_leading_rows=1, + allow_quoted_newlines=True, + write_disposition="WRITE_TRUNCATE", + schema_fields=[ + {"name": "country_code", "type": "string", "mode": "nullable"}, + {"name": "short_name", "type": "string", "mode": "nullable"}, + {"name": "table_name", "type": "string", "mode": "nullable"}, + {"name": "long_name", "type": "string", "mode": "nullable"}, + {"name": "two_alpha_code", "type": "string", "mode": "nullable"}, + {"name": "currency_unit", "type": "string", "mode": "nullable"}, + {"name": "special_notes", "type": "string", "mode": "nullable"}, + {"name": "region", "type": "string", "mode": "nullable"}, + {"name": "income_group", "type": "string", "mode": "nullable"}, + {"name": "wb_2_code", "type": "string", "mode": "nullable"}, + { + "name": "national_accounts_base_year", + "type": "string", + "mode": "nullable", + }, + { + "name": "national_accounts_reference_year", + "type": "string", + "mode": "nullable", + }, + {"name": "sna_price_valuation", "type": "string", "mode": "nullable"}, + {"name": "lending_category", "type": "string", "mode": "nullable"}, + {"name": "other_groups", "type": "string", "mode": "nullable"}, + { + "name": "system_of_national_accounts", + "type": "string", + "mode": "nullable", + }, + { + "name": "alternative_conversion_factor", + "type": "string", + "mode": "nullable", + }, + {"name": "ppp_survey_year", "type": "string", "mode": "nullable"}, + { + "name": "balance_of_payments_manual_in_use", + "type": "string", + "mode": "nullable", + }, + { + "name": "external_debt_reporting_status", + "type": "string", + "mode": "nullable", + }, + {"name": "system_of_trade", "type": "string", "mode": "nullable"}, + { + "name": "government_accounting_concept", + "type": "string", + "mode": "nullable", + }, + { + "name": "imf_data_dissemination_standard", + "type": "string", + "mode": "nullable", + }, + {"name": "latest_population_census", "type": "string", "mode": "nullable"}, + {"name": "latest_household_survey", "type": "string", "mode": "nullable"}, + { + "name": "source_of_most_recent_Income_and_expenditure_data", + "type": "string", + "mode": "nullable", + }, + { + "name": "vital_registration_complete", + "type": "string", + "mode": "nullable", + }, + { + "name": "latest_agricultural_census", + "type": "string", + "mode": "nullable", + }, + {"name": "latest_industrial_data", "type": "integer", "mode": "nullable"}, + {"name": "latest_trade_data", "type": "integer", "mode": "nullable"}, + { + "name": "latest_water_withdrawal_data", + "type": "integer", + "mode": "nullable", + }, + ], + ) + + country_summary_transform_csv >> load_country_summary_to_bq diff --git a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml new file mode 100644 index 000000000..9f9c917d6 --- /dev/null +++ b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml @@ -0,0 +1,216 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +resources: + + - type: bigquery_table + # Required Properties: + table_id: country_summary + + # Description of the table + description: "Country Summary table" + +dag: + airflow_version: 1 + initialize: + dag_id: country_summary + default_args: + owner: "Google" + + # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded + depends_on_past: False + start_date: "2021-03-01" + max_active_runs: 1 + schedule_interval: "@daily" + catchup: False + default_view: graph + + tasks: + - operator: "KubernetesPodOperator" + + # Task description + description: "Run CSV transform within kubernetes pod" + + args: + + task_id: "country_summary_transform_csv" + + startup_timeout_seconds: 600 + + # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id + name: "country_summary" + + # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. + namespace: "default" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cloud.google.com/gke-nodepool + operator: In + values: + - "pool-e2-standard-4" + + + image_pull_policy: "Always" + + # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. + image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" + + # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. + env_vars: + SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry.csv" + SOURCE_FILE: "files/data.csv" + COLUMN_TO_REMOVE: "Unnamed: 31" + TARGET_FILE: "files/data_output.csv" + TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_PATH: "data/world_bank_intl_debt/country_summary/data_output.csv" + PIPELINE_NAME: "country_summary" + CSV_HEADERS: >- + ["country_code","short_name","table_name","long_name","two_alpha_code","currency_unit","special_notes","region","income_group","wb_2_code","national_accounts_base_year","national_accounts_reference_year","sna_price_valuation","lending_category","other_groups","system_of_national_accounts","alternative_conversion_factor","ppp_survey_year","balance_of_payments_manual_in_use","external_debt_reporting_status","system_of_trade","government_accounting_concept","imf_data_dissemination_standard","latest_population_census","latest_household_survey","source_of_most_recent_Income_and_expenditure_data","vital_registration_complete","latest_agricultural_census","latest_industrial_data","latest_trade_data","latest_water_withdrawal_data"] + RENAME_MAPPINGS: >- + {"Country Code":"country_code","Short Name":"short_name","Table Name":"table_name","Long Name":"long_name","2-alpha code":"two_alpha_code","Currency Unit":"currency_unit","Special Notes":"special_notes","Region":"region","Income Group":"income_group","WB-2 code":"wb_2_code","National accounts base year":"national_accounts_base_year","National accounts reference year":"national_accounts_reference_year","SNA price valuation":"sna_price_valuation","Lending category":"lending_category","Other groups":"other_groups","System of National Accounts":"system_of_national_accounts","Alternative conversion factor":"alternative_conversion_factor","PPP survey year":"ppp_survey_year","Balance of Payments Manual in use":"balance_of_payments_manual_in_use","External debt Reporting status":"external_debt_reporting_status","System of trade":"system_of_trade","Government Accounting concept":"government_accounting_concept","IMF data dissemination standard":"imf_data_dissemination_standard","Latest population census":"latest_population_census","Latest household survey":"latest_household_survey","Source of most recent Income and expenditure data":"source_of_most_recent_Income_and_expenditure_data","Vital registration complete":"vital_registration_complete","Latest agricultural census":"latest_agricultural_census","Latest industrial data":"latest_industrial_data","Latest trade data":"latest_trade_data"} + # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes + resources: + request_memory: "2G" + request_cpu: "1" + + - operator: "GoogleCloudStorageToBigQueryOperator" + description: "Task to load CSV data to a BigQuery table" + + args: + task_id: "load_country_summary_to_bq" + + # The GCS bucket where the CSV file is located in. + bucket: "{{ var.json.shared.composer_bucket }}" + + # The GCS object path for the CSV file + source_objects: ["data/world_bank_intl_debt/country_summary/data_output.csv"] + source_format: "CSV" + destination_project_dataset_table: "world_bank_intl_debt.country_summary" + + # Use this if your CSV file contains a header row + skip_leading_rows: 1 + allow_quoted_newlines: True + + # How to write data to the table: overwrite, append, or write if empty + # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition + write_disposition: "WRITE_TRUNCATE" + + # The BigQuery table schema based on the CSV file. For more info, see + # https://cloud.google.com/bigquery/docs/schemas. + # Always use snake_case and lowercase for column names, and be explicit, + # i.e. specify modes for all columns. + + schema_fields: + - name: "country_code" + type: "string" + mode: "nullable" + - name: "short_name" + type: "string" + mode: "nullable" + - name: "table_name" + type: "string" + mode: "nullable" + - name: "long_name" + type: "string" + mode: "nullable" + - name: "two_alpha_code" + type: "string" + mode: "nullable" + - name: "currency_unit" + type: "string" + mode: "nullable" + - name: "special_notes" + type: "string" + mode: "nullable" + - name: "region" + type: "string" + mode: "nullable" + - name: "income_group" + type: "string" + mode: "nullable" + - name: "wb_2_code" + type: "string" + mode: "nullable" + - name: "national_accounts_base_year" + type: "string" + mode: "nullable" + - name: "national_accounts_reference_year" + type: "string" + mode: "nullable" + - name: "sna_price_valuation" + type: "string" + mode: "nullable" + - name: "lending_category" + type: "string" + mode: "nullable" + - name: "other_groups" + type: "string" + mode: "nullable" + - name: "system_of_national_accounts" + type: "string" + mode: "nullable" + - name: "alternative_conversion_factor" + type: "string" + mode: "nullable" + - name: "ppp_survey_year" + type: "string" + mode: "nullable" + - name: "balance_of_payments_manual_in_use" + type: "string" + mode: "nullable" + - name: "external_debt_reporting_status" + type: "string" + mode: "nullable" + - name: "system_of_trade" + type: "string" + mode: "nullable" + - name: "government_accounting_concept" + type: "string" + mode: "nullable" + - name: "imf_data_dissemination_standard" + type: "string" + mode: "nullable" + - name: "latest_population_census" + type: "string" + mode: "nullable" + - name: "latest_household_survey" + type: "string" + mode: "nullable" + - name: "source_of_most_recent_Income_and_expenditure_data" + type: "string" + mode: "nullable" + - name: "vital_registration_complete" + type: "string" + mode: "nullable" + - name: "latest_agricultural_census" + type: "string" + mode: "nullable" + - name: "latest_industrial_data" + type: "integer" + mode: "nullable" + - name: "latest_trade_data" + type: "integer" + mode: "nullable" + - name: "latest_water_withdrawal_data" + type: "integer" + mode: "nullable" + + + graph_paths: + - "country_summary_transform_csv >> load_country_summary_to_bq" \ No newline at end of file diff --git a/datasets/world_bank_intl_debt/dataset.yaml b/datasets/world_bank_intl_debt/dataset.yaml new file mode 100644 index 000000000..3c8e72eac --- /dev/null +++ b/datasets/world_bank_intl_debt/dataset.yaml @@ -0,0 +1,43 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dataset: + # The `dataset` block includes properties for your dataset that will be shown + # to users of your data on the Google Cloud website. + + # Must be exactly the same name as the folder name your dataset.yaml is in. + name: world_bank_intl_debt + + # A friendly, human-readable name of the dataset + friendly_name: world_bank_intl_debt + + # A short, descriptive summary of the dataset. + description: "World Bank Intl Debt" + + # A list of sources the dataset is derived from, using the YAML list syntax. + dataset_sources: ~ + + # A list of terms and conditions that users of the dataset should agree on, + # using the YAML list syntax. + terms_of_use: ~ + + +resources: + # A list of Google Cloud resources needed by your dataset. In principle, all + # pipelines under a dataset should be able to share these resources. + + - type: bigquery_dataset + + dataset_id: world_bank_intl_debt + description: "World Bank Intl Debt" \ No newline at end of file diff --git a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml new file mode 100644 index 000000000..2e92d1789 --- /dev/null +++ b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml @@ -0,0 +1,183 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +resources: + + - type: bigquery_table + # Required Properties: + table_id: series_summary + + # Description of the table + description: "Series Summary table" + +dag: + airflow_version: 1 + initialize: + dag_id: series_summary + default_args: + owner: "Google" + + # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded + depends_on_past: False + start_date: "2021-03-01" + max_active_runs: 1 + schedule_interval: "@daily" + catchup: False + default_view: graph + + tasks: + - operator: "KubernetesPodOperator" + + # Task description + description: "Run CSV transform within kubernetes pod" + + args: + + task_id: "series_summary_transform_csv" + + startup_timeout_seconds: 600 + + # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id + name: "series_summary" + + # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. + namespace: "default" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cloud.google.com/gke-nodepool + operator: In + values: + - "pool-e2-standard-4" + + + image_pull_policy: "Always" + + # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. + image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" + + # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. + env_vars: + SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries.csv" + SOURCE_FILE: "files/data.csv" + COLUMN_TO_REMOVE: "Unnamed: 20" + TARGET_FILE: "files/data_output.csv" + TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_PATH: "data/world_bank_intl_debt/series_summary/data_output.csv" + PIPELINE_NAME: "series_summary" + CSV_HEADERS: >- + ["series_code" ,"topic" ,"indicator_name" ,"short_definition" ,"long_definition" ,"unit_of_measure" ,"periodicity" ,"base_period" ,"other_notes" ,"aggregation_method" ,"limitations_and_exceptions" ,"notes_from_original_source" ,"general_comments" ,"source" ,"statistical_concept_and_methodology" ,"development_relevance" ,"related_source_links" ,"other_web_links" ,"related_indicators" ,"license_type"] + RENAME_MAPPINGS: >- + {"Series Code":"series_code" ,"Topic":"topic" ,"Indicator Name":"indicator_name" ,"Short definition":"short_definition" ,"Long definition":"long_definition" ,"Unit of measure":"unit_of_measure" ,"Periodicity":"periodicity" ,"Base Period":"base_period" ,"Other notes":"other_notes" ,"Aggregation method":"aggregation_method" ,"Limitations and exceptions":"limitations_and_exceptions" ,"Notes from original source":"notes_from_original_source" ,"General comments":"general_comments" ,"Source":"source" ,"Statistical concept and methodology":"statistical_concept_and_methodology" ,"Development relevance":"development_relevance" ,"Related source links":"related_source_links" ,"Other web links":"other_web_links" ,"Related indicators":"related_indicators" ,"License Type":"license_type"} + # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes + resources: + request_memory: "2G" + request_cpu: "1" + + - operator: "GoogleCloudStorageToBigQueryOperator" + description: "Task to load CSV data to a BigQuery table" + + args: + task_id: "load_series_summary_to_bq" + + # The GCS bucket where the CSV file is located in. + bucket: "{{ var.json.shared.composer_bucket }}" + + # The GCS object path for the CSV file + source_objects: ["data/world_bank_intl_debt/series_summary/data_output.csv"] + source_format: "CSV" + destination_project_dataset_table: "world_bank_intl_debt.series_summary" + + # Use this if your CSV file contains a header row + skip_leading_rows: 1 + allow_quoted_newlines: True + + # How to write data to the table: overwrite, append, or write if empty + # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition + write_disposition: "WRITE_TRUNCATE" + + # The BigQuery table schema based on the CSV file. For more info, see + # https://cloud.google.com/bigquery/docs/schemas. + # Always use snake_case and lowercase for column names, and be explicit, + # i.e. specify modes for all columns. + + schema_fields: + - name: "series_code" + type: "string" + mode: "nullable" + - name: "topic" + type: "string" + mode: "nullable" + - name: "indicator_name" + type: "string" + mode: "nullable" + - name: "short_definition" + type: "string" + mode: "nullable" + - name: "long_definition" + type: "string" + mode: "nullable" + - name: "unit_of_measure" + type: "string" + mode: "nullable" + - name: "periodicity" + type: "string" + mode: "nullable" + - name: "base_period" + type: "string" + mode: "nullable" + - name: "other_notes" + type: "string" + mode: "nullable" + - name: "aggregation_method" + type: "string" + mode: "nullable" + - name: "limitations_and_exceptions" + type: "string" + mode: "nullable" + - name: "notes_from_original_source" + type: "string" + mode: "nullable" + - name: "general_comments" + type: "string" + mode: "nullable" + - name: "source" + type: "string" + mode: "nullable" + - name: "statistical_concept_and_methodology" + type: "string" + mode: "nullable" + - name: "development_relevance" + type: "string" + mode: "nullable" + - name: "related_source_links" + type: "string" + mode: "nullable" + - name: "other_web_links" + type: "string" + mode: "nullable" + - name: "related_indicators" + type: "string" + mode: "nullable" + - name: "license_type" + type: "string" + mode: "nullable" + + + graph_paths: + - "series_summary_transform_csv >> load_series_summary_to_bq" diff --git a/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py b/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py new file mode 100644 index 000000000..8a15865d4 --- /dev/null +++ b/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py @@ -0,0 +1,120 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from airflow import DAG +from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator + +default_args = { + "owner": "Google", + "depends_on_past": False, + "start_date": "2021-03-01", +} + + +with DAG( + dag_id="world_bank_intl_debt.series_summary", + default_args=default_args, + max_active_runs=1, + schedule_interval="@daily", + catchup=False, + default_view="graph", +) as dag: + + # Run CSV transform within kubernetes pod + series_summary_transform_csv = kubernetes_pod_operator.KubernetesPodOperator( + task_id="series_summary_transform_csv", + startup_timeout_seconds=600, + name="series_summary", + namespace="default", + affinity={ + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cloud.google.com/gke-nodepool", + "operator": "In", + "values": ["pool-e2-standard-4"], + } + ] + } + ] + } + } + }, + image_pull_policy="Always", + image="{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}", + env_vars={ + "SOURCE_URL": "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries.csv", + "SOURCE_FILE": "files/data.csv", + "COLUMN_TO_REMOVE": "Unnamed: 20", + "TARGET_FILE": "files/data_output.csv", + "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_PATH": "data/world_bank_intl_debt/series_summary/data_output.csv", + "PIPELINE_NAME": "series_summary", + "CSV_HEADERS": '["series_code" ,"topic" ,"indicator_name" ,"short_definition" ,"long_definition" ,"unit_of_measure" ,"periodicity" ,"base_period" ,"other_notes" ,"aggregation_method" ,"limitations_and_exceptions" ,"notes_from_original_source" ,"general_comments" ,"source" ,"statistical_concept_and_methodology" ,"development_relevance" ,"related_source_links" ,"other_web_links" ,"related_indicators" ,"license_type"]', + "RENAME_MAPPINGS": '{"Series Code":"series_code" ,"Topic":"topic" ,"Indicator Name":"indicator_name" ,"Short definition":"short_definition" ,"Long definition":"long_definition" ,"Unit of measure":"unit_of_measure" ,"Periodicity":"periodicity" ,"Base Period":"base_period" ,"Other notes":"other_notes" ,"Aggregation method":"aggregation_method" ,"Limitations and exceptions":"limitations_and_exceptions" ,"Notes from original source":"notes_from_original_source" ,"General comments":"general_comments" ,"Source":"source" ,"Statistical concept and methodology":"statistical_concept_and_methodology" ,"Development relevance":"development_relevance" ,"Related source links":"related_source_links" ,"Other web links":"other_web_links" ,"Related indicators":"related_indicators" ,"License Type":"license_type"}', + }, + resources={"request_memory": "2G", "request_cpu": "1"}, + ) + + # Task to load CSV data to a BigQuery table + load_series_summary_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( + task_id="load_series_summary_to_bq", + bucket="{{ var.json.shared.composer_bucket }}", + source_objects=["data/world_bank_intl_debt/series_summary/data_output.csv"], + source_format="CSV", + destination_project_dataset_table="world_bank_intl_debt.series_summary", + skip_leading_rows=1, + allow_quoted_newlines=True, + write_disposition="WRITE_TRUNCATE", + schema_fields=[ + {"name": "series_code", "type": "string", "mode": "nullable"}, + {"name": "topic", "type": "string", "mode": "nullable"}, + {"name": "indicator_name", "type": "string", "mode": "nullable"}, + {"name": "short_definition", "type": "string", "mode": "nullable"}, + {"name": "long_definition", "type": "string", "mode": "nullable"}, + {"name": "unit_of_measure", "type": "string", "mode": "nullable"}, + {"name": "periodicity", "type": "string", "mode": "nullable"}, + {"name": "base_period", "type": "string", "mode": "nullable"}, + {"name": "other_notes", "type": "string", "mode": "nullable"}, + {"name": "aggregation_method", "type": "string", "mode": "nullable"}, + { + "name": "limitations_and_exceptions", + "type": "string", + "mode": "nullable", + }, + { + "name": "notes_from_original_source", + "type": "string", + "mode": "nullable", + }, + {"name": "general_comments", "type": "string", "mode": "nullable"}, + {"name": "source", "type": "string", "mode": "nullable"}, + { + "name": "statistical_concept_and_methodology", + "type": "string", + "mode": "nullable", + }, + {"name": "development_relevance", "type": "string", "mode": "nullable"}, + {"name": "related_source_links", "type": "string", "mode": "nullable"}, + {"name": "other_web_links", "type": "string", "mode": "nullable"}, + {"name": "related_indicators", "type": "string", "mode": "nullable"}, + {"name": "license_type", "type": "string", "mode": "nullable"}, + ], + ) + + series_summary_transform_csv >> load_series_summary_to_bq diff --git a/datasets/world_bank_intl_debt/series_times/pipeline.yaml b/datasets/world_bank_intl_debt/series_times/pipeline.yaml new file mode 100644 index 000000000..f4e884e08 --- /dev/null +++ b/datasets/world_bank_intl_debt/series_times/pipeline.yaml @@ -0,0 +1,131 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +resources: + + - type: bigquery_table + # Required Properties: + table_id: series_times + + # Description of the table + description: "Series Times table" + +dag: + airflow_version: 1 + initialize: + dag_id: series_times + default_args: + owner: "Google" + + # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded + depends_on_past: False + start_date: "2021-03-01" + max_active_runs: 1 + schedule_interval: "@daily" + catchup: False + default_view: graph + + tasks: + - operator: "KubernetesPodOperator" + + # Task description + description: "Run CSV transform within kubernetes pod" + + args: + + task_id: "series_times_transform_csv" + + startup_timeout_seconds: 600 + + # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id + name: "series_times" + + # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. + namespace: "default" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cloud.google.com/gke-nodepool + operator: In + values: + - "pool-e2-standard-4" + + + image_pull_policy: "Always" + + # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. + image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" + + # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. + env_vars: + SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries-Time.csv" + SOURCE_FILE: "files/data.csv" + COLUMN_TO_REMOVE: "Unnamed: 3" + TARGET_FILE: "files/data_output.csv" + TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_PATH: "data/world_bank_intl_debt/series_times/data_output.csv" + PIPELINE_NAME: "series_times" + CSV_HEADERS: >- + ["series_code","year","description"] + RENAME_MAPPINGS: >- + {"SeriesCode" : "series_code" ,"Year" : "year" ,"DESCRIPTION" : "description"} + # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes + resources: + request_memory: "2G" + request_cpu: "1" + + - operator: "GoogleCloudStorageToBigQueryOperator" + description: "Task to load CSV data to a BigQuery table" + + args: + task_id: "load_series_times_to_bq" + + # The GCS bucket where the CSV file is located in. + bucket: "{{ var.json.shared.composer_bucket }}" + + # The GCS object path for the CSV file + source_objects: ["data/world_bank_intl_debt/series_times/data_output.csv"] + source_format: "CSV" + destination_project_dataset_table: "world_bank_intl_debt.series_times" + + # Use this if your CSV file contains a header row + skip_leading_rows: 1 + + # How to write data to the table: overwrite, append, or write if empty + # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition + write_disposition: "WRITE_TRUNCATE" + + # The BigQuery table schema based on the CSV file. For more info, see + # https://cloud.google.com/bigquery/docs/schemas. + # Always use snake_case and lowercase for column names, and be explicit, + # i.e. specify modes for all columns. + + schema_fields: + - name: "series_code" + type: "string" + mode: "nullable" + - name: "year" + type: "integer" + mode: "nullable" + - name: "description" + type: "string" + mode: "nullable" + + + graph_paths: + - "series_times_transform_csv >> load_series_times_to_bq" \ No newline at end of file diff --git a/datasets/world_bank_intl_debt/series_times/series_times_dag.py b/datasets/world_bank_intl_debt/series_times/series_times_dag.py new file mode 100644 index 000000000..b0f4c4c11 --- /dev/null +++ b/datasets/world_bank_intl_debt/series_times/series_times_dag.py @@ -0,0 +1,90 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from airflow import DAG +from airflow.contrib.operators import gcs_to_bq, kubernetes_pod_operator + +default_args = { + "owner": "Google", + "depends_on_past": False, + "start_date": "2021-03-01", +} + + +with DAG( + dag_id="world_bank_intl_debt.series_times", + default_args=default_args, + max_active_runs=1, + schedule_interval="@daily", + catchup=False, + default_view="graph", +) as dag: + + # Run CSV transform within kubernetes pod + series_times_transform_csv = kubernetes_pod_operator.KubernetesPodOperator( + task_id="series_times_transform_csv", + startup_timeout_seconds=600, + name="series_times", + namespace="default", + affinity={ + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cloud.google.com/gke-nodepool", + "operator": "In", + "values": ["pool-e2-standard-4"], + } + ] + } + ] + } + } + }, + image_pull_policy="Always", + image="{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}", + env_vars={ + "SOURCE_URL": "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries-Time.csv", + "SOURCE_FILE": "files/data.csv", + "COLUMN_TO_REMOVE": "Unnamed: 3", + "TARGET_FILE": "files/data_output.csv", + "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_PATH": "data/world_bank_intl_debt/series_times/data_output.csv", + "PIPELINE_NAME": "series_times", + "CSV_HEADERS": '["series_code","year","description"]', + "RENAME_MAPPINGS": '{"SeriesCode" : "series_code" ,"Year" : "year" ,"DESCRIPTION" : "description"}', + }, + resources={"request_memory": "2G", "request_cpu": "1"}, + ) + + # Task to load CSV data to a BigQuery table + load_series_times_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( + task_id="load_series_times_to_bq", + bucket="{{ var.json.shared.composer_bucket }}", + source_objects=["data/world_bank_intl_debt/series_times/data_output.csv"], + source_format="CSV", + destination_project_dataset_table="world_bank_intl_debt.series_times", + skip_leading_rows=1, + write_disposition="WRITE_TRUNCATE", + schema_fields=[ + {"name": "series_code", "type": "string", "mode": "nullable"}, + {"name": "year", "type": "integer", "mode": "nullable"}, + {"name": "description", "type": "string", "mode": "nullable"}, + ], + ) + + series_times_transform_csv >> load_series_times_to_bq From 21438c5d802287f8345887012d2bb433266608f1 Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 09:47:15 +0000 Subject: [PATCH 5/9] feat: Onboard World Bank Intl Debt dataset --- .../country_series_definitions/pipeline.yaml | 2 +- datasets/world_bank_intl_debt/country_summary/pipeline.yaml | 2 +- datasets/world_bank_intl_debt/series_summary/pipeline.yaml | 1 + datasets/world_bank_intl_debt/series_times/pipeline.yaml | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml index e428c4287..19398f450 100644 --- a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml +++ b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml @@ -128,4 +128,4 @@ dag: graph_paths: - - "country_series_definitions_transform_csv >> load_country_series_definitions_to_bq" \ No newline at end of file + - "country_series_definitions_transform_csv >> load_country_series_definitions_to_bq" diff --git a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml index 9f9c917d6..3a1750004 100644 --- a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml @@ -213,4 +213,4 @@ dag: graph_paths: - - "country_summary_transform_csv >> load_country_summary_to_bq" \ No newline at end of file + - "country_summary_transform_csv >> load_country_summary_to_bq" diff --git a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml index 2e92d1789..206988f38 100644 --- a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml @@ -181,3 +181,4 @@ dag: graph_paths: - "series_summary_transform_csv >> load_series_summary_to_bq" + diff --git a/datasets/world_bank_intl_debt/series_times/pipeline.yaml b/datasets/world_bank_intl_debt/series_times/pipeline.yaml index f4e884e08..4df445543 100644 --- a/datasets/world_bank_intl_debt/series_times/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_times/pipeline.yaml @@ -128,4 +128,4 @@ dag: graph_paths: - - "series_times_transform_csv >> load_series_times_to_bq" \ No newline at end of file + - "series_times_transform_csv >> load_series_times_to_bq" From b8bcd180cbc486f7c9e9ed3dd2018f74e238801e Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 09:50:23 +0000 Subject: [PATCH 6/9] feat: Onboard World Bank Intl Debt dataset --- datasets/world_bank_intl_debt/dataset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datasets/world_bank_intl_debt/dataset.yaml b/datasets/world_bank_intl_debt/dataset.yaml index 3c8e72eac..1b430e7f9 100644 --- a/datasets/world_bank_intl_debt/dataset.yaml +++ b/datasets/world_bank_intl_debt/dataset.yaml @@ -40,4 +40,4 @@ resources: - type: bigquery_dataset dataset_id: world_bank_intl_debt - description: "World Bank Intl Debt" \ No newline at end of file + description: "World Bank Intl Debt" From 17407eb46e60d7a0f0a56f39dc7a34beacc0e061 Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Mon, 13 Sep 2021 10:47:42 +0000 Subject: [PATCH 7/9] feat: Onboard World Bank Intl Debt dataset --- datasets/world_bank_intl_debt/series_summary/pipeline.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml index 206988f38..2e92d1789 100644 --- a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml @@ -181,4 +181,3 @@ dag: graph_paths: - "series_summary_transform_csv >> load_series_summary_to_bq" - From c68f4bc1c05053c888953fa16bd065fcad9948bf Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Wed, 22 Sep 2021 07:07:05 +0000 Subject: [PATCH 8/9] style: removed commented lines --- .../run_csv_transform_kub/csv_transform.py | 4 +-- .../country_series_definitions_dag.py | 4 +-- .../country_series_definitions/pipeline.yaml | 23 ++--------------- .../country_summary/country_summary_dag.py | 4 +-- .../country_summary/pipeline.yaml | 23 ++--------------- .../series_summary/pipeline.yaml | 25 +++---------------- .../series_summary/series_summary_dag.py | 4 +-- .../series_times/pipeline.yaml | 24 ++---------------- .../series_times/series_times_dag.py | 4 +-- 9 files changed, 19 insertions(+), 96 deletions(-) diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py index 63b4a51c5..e59ed3402 100644 --- a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py @@ -43,7 +43,7 @@ def main( + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ) - logging.info("creating 'files' folder") + logging.info("Creating 'files' folder") pathlib.Path("./files").mkdir(parents=True, exist_ok=True) logging.info(f"Downloading file {source_url}") @@ -54,8 +54,6 @@ def main( logging.info(f"Transforming {source_file} ... ") - # print(df.columns) - logging.info(f"Transform: Dropping column {column_name} ...") delete_column(df, column_name) diff --git a/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py b/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py index afaf61b0a..07e14589d 100644 --- a/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py +++ b/datasets/world_bank_intl_debt/country_series_definitions/country_series_definitions_dag.py @@ -62,7 +62,7 @@ "SOURCE_FILE": "files/data.csv", "COLUMN_TO_REMOVE": "Unnamed: 3", "TARGET_FILE": "files/data_output.csv", - "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}", "TARGET_GCS_PATH": "data/world_bank_intl_debt/country_series_definitions/data_output.csv", "PIPELINE_NAME": "country_series_definitions", "CSV_HEADERS": '["country_code" ,"series_code" ,"description"]', @@ -74,7 +74,7 @@ # Task to load CSV data to a BigQuery table load_country_series_definitions_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( task_id="load_country_series_definitions_to_bq", - bucket="{{ var.json.shared.composer_bucket }}", + bucket="{{ var.value.composer_bucket }}", source_objects=[ "data/world_bank_intl_debt/country_series_definitions/data_output.csv" ], diff --git a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml index 19398f450..74c147764 100644 --- a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml +++ b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml @@ -16,10 +16,8 @@ resources: - type: bigquery_table - # Required Properties: table_id: country_series_definitions - # Description of the table description: "Country Series Definition table" dag: @@ -29,7 +27,6 @@ dag: default_args: owner: "Google" - # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -40,7 +37,6 @@ dag: tasks: - operator: "KubernetesPodOperator" - # Task description description: "Run CSV transform within kubernetes pod" args: @@ -49,10 +45,8 @@ dag: startup_timeout_seconds: 600 - # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id name: "country_series_definitions" - # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. namespace: "default" affinity: @@ -68,23 +62,20 @@ dag: image_pull_policy: "Always" - # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry-Series.csv" SOURCE_FILE: "files/data.csv" COLUMN_TO_REMOVE: "Unnamed: 3" TARGET_FILE: "files/data_output.csv" - TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_BUCKET: "{{ var.value.composer_bucket }}" TARGET_GCS_PATH: "data/world_bank_intl_debt/country_series_definitions/data_output.csv" PIPELINE_NAME: "country_series_definitions" CSV_HEADERS: >- ["country_code" ,"series_code" ,"description"] RENAME_MAPPINGS: >- {"CountryCode":"country_code","SeriesCode":"series_code","DESCRIPTION":"description"} - # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes resources: request_memory: "2G" request_cpu: "1" @@ -95,26 +86,16 @@ dag: args: task_id: "load_country_series_definitions_to_bq" - # The GCS bucket where the CSV file is located in. - bucket: "{{ var.json.shared.composer_bucket }}" + bucket: "{{ var.value.composer_bucket }}" - # The GCS object path for the CSV file source_objects: ["data/world_bank_intl_debt/country_series_definitions/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.country_series_definitions" - # Use this if your CSV file contains a header row skip_leading_rows: 1 - # How to write data to the table: overwrite, append, or write if empty - # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition write_disposition: "WRITE_TRUNCATE" - # The BigQuery table schema based on the CSV file. For more info, see - # https://cloud.google.com/bigquery/docs/schemas. - # Always use snake_case and lowercase for column names, and be explicit, - # i.e. specify modes for all columns. - schema_fields: - name: "country_code" type: "string" diff --git a/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py b/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py index 1d667f8ad..e6f1ed4e8 100644 --- a/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py +++ b/datasets/world_bank_intl_debt/country_summary/country_summary_dag.py @@ -62,7 +62,7 @@ "SOURCE_FILE": "files/data.csv", "COLUMN_TO_REMOVE": "Unnamed: 31", "TARGET_FILE": "files/data_output.csv", - "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}", "TARGET_GCS_PATH": "data/world_bank_intl_debt/country_summary/data_output.csv", "PIPELINE_NAME": "country_summary", "CSV_HEADERS": '["country_code","short_name","table_name","long_name","two_alpha_code","currency_unit","special_notes","region","income_group","wb_2_code","national_accounts_base_year","national_accounts_reference_year","sna_price_valuation","lending_category","other_groups","system_of_national_accounts","alternative_conversion_factor","ppp_survey_year","balance_of_payments_manual_in_use","external_debt_reporting_status","system_of_trade","government_accounting_concept","imf_data_dissemination_standard","latest_population_census","latest_household_survey","source_of_most_recent_Income_and_expenditure_data","vital_registration_complete","latest_agricultural_census","latest_industrial_data","latest_trade_data","latest_water_withdrawal_data"]', @@ -74,7 +74,7 @@ # Task to load CSV data to a BigQuery table load_country_summary_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( task_id="load_country_summary_to_bq", - bucket="{{ var.json.shared.composer_bucket }}", + bucket="{{ var.value.composer_bucket }}", source_objects=["data/world_bank_intl_debt/country_summary/data_output.csv"], source_format="CSV", destination_project_dataset_table="world_bank_intl_debt.country_summary", diff --git a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml index 3a1750004..68177ac52 100644 --- a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml @@ -16,10 +16,8 @@ resources: - type: bigquery_table - # Required Properties: table_id: country_summary - # Description of the table description: "Country Summary table" dag: @@ -29,7 +27,6 @@ dag: default_args: owner: "Google" - # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -40,7 +37,6 @@ dag: tasks: - operator: "KubernetesPodOperator" - # Task description description: "Run CSV transform within kubernetes pod" args: @@ -49,10 +45,8 @@ dag: startup_timeout_seconds: 600 - # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id name: "country_summary" - # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. namespace: "default" affinity: @@ -68,23 +62,20 @@ dag: image_pull_policy: "Always" - # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry.csv" SOURCE_FILE: "files/data.csv" COLUMN_TO_REMOVE: "Unnamed: 31" TARGET_FILE: "files/data_output.csv" - TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_BUCKET: "{{ var.value.composer_bucket }}" TARGET_GCS_PATH: "data/world_bank_intl_debt/country_summary/data_output.csv" PIPELINE_NAME: "country_summary" CSV_HEADERS: >- ["country_code","short_name","table_name","long_name","two_alpha_code","currency_unit","special_notes","region","income_group","wb_2_code","national_accounts_base_year","national_accounts_reference_year","sna_price_valuation","lending_category","other_groups","system_of_national_accounts","alternative_conversion_factor","ppp_survey_year","balance_of_payments_manual_in_use","external_debt_reporting_status","system_of_trade","government_accounting_concept","imf_data_dissemination_standard","latest_population_census","latest_household_survey","source_of_most_recent_Income_and_expenditure_data","vital_registration_complete","latest_agricultural_census","latest_industrial_data","latest_trade_data","latest_water_withdrawal_data"] RENAME_MAPPINGS: >- {"Country Code":"country_code","Short Name":"short_name","Table Name":"table_name","Long Name":"long_name","2-alpha code":"two_alpha_code","Currency Unit":"currency_unit","Special Notes":"special_notes","Region":"region","Income Group":"income_group","WB-2 code":"wb_2_code","National accounts base year":"national_accounts_base_year","National accounts reference year":"national_accounts_reference_year","SNA price valuation":"sna_price_valuation","Lending category":"lending_category","Other groups":"other_groups","System of National Accounts":"system_of_national_accounts","Alternative conversion factor":"alternative_conversion_factor","PPP survey year":"ppp_survey_year","Balance of Payments Manual in use":"balance_of_payments_manual_in_use","External debt Reporting status":"external_debt_reporting_status","System of trade":"system_of_trade","Government Accounting concept":"government_accounting_concept","IMF data dissemination standard":"imf_data_dissemination_standard","Latest population census":"latest_population_census","Latest household survey":"latest_household_survey","Source of most recent Income and expenditure data":"source_of_most_recent_Income_and_expenditure_data","Vital registration complete":"vital_registration_complete","Latest agricultural census":"latest_agricultural_census","Latest industrial data":"latest_industrial_data","Latest trade data":"latest_trade_data"} - # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes resources: request_memory: "2G" request_cpu: "1" @@ -95,27 +86,17 @@ dag: args: task_id: "load_country_summary_to_bq" - # The GCS bucket where the CSV file is located in. - bucket: "{{ var.json.shared.composer_bucket }}" + bucket: "{{ var.value.composer_bucket }}" - # The GCS object path for the CSV file source_objects: ["data/world_bank_intl_debt/country_summary/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.country_summary" - # Use this if your CSV file contains a header row skip_leading_rows: 1 allow_quoted_newlines: True - # How to write data to the table: overwrite, append, or write if empty - # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition write_disposition: "WRITE_TRUNCATE" - # The BigQuery table schema based on the CSV file. For more info, see - # https://cloud.google.com/bigquery/docs/schemas. - # Always use snake_case and lowercase for column names, and be explicit, - # i.e. specify modes for all columns. - schema_fields: - name: "country_code" type: "string" diff --git a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml index 2e92d1789..573d546d0 100644 --- a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml @@ -16,10 +16,8 @@ resources: - type: bigquery_table - # Required Properties: table_id: series_summary - # Description of the table description: "Series Summary table" dag: @@ -29,7 +27,6 @@ dag: default_args: owner: "Google" - # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -40,7 +37,7 @@ dag: tasks: - operator: "KubernetesPodOperator" - # Task description + description: "Run CSV transform within kubernetes pod" args: @@ -49,10 +46,8 @@ dag: startup_timeout_seconds: 600 - # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id name: "series_summary" - # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. namespace: "default" affinity: @@ -68,23 +63,21 @@ dag: image_pull_policy: "Always" - # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries.csv" SOURCE_FILE: "files/data.csv" COLUMN_TO_REMOVE: "Unnamed: 20" TARGET_FILE: "files/data_output.csv" - TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_BUCKET: "{{ var.value.composer_bucket }}" TARGET_GCS_PATH: "data/world_bank_intl_debt/series_summary/data_output.csv" PIPELINE_NAME: "series_summary" CSV_HEADERS: >- ["series_code" ,"topic" ,"indicator_name" ,"short_definition" ,"long_definition" ,"unit_of_measure" ,"periodicity" ,"base_period" ,"other_notes" ,"aggregation_method" ,"limitations_and_exceptions" ,"notes_from_original_source" ,"general_comments" ,"source" ,"statistical_concept_and_methodology" ,"development_relevance" ,"related_source_links" ,"other_web_links" ,"related_indicators" ,"license_type"] RENAME_MAPPINGS: >- {"Series Code":"series_code" ,"Topic":"topic" ,"Indicator Name":"indicator_name" ,"Short definition":"short_definition" ,"Long definition":"long_definition" ,"Unit of measure":"unit_of_measure" ,"Periodicity":"periodicity" ,"Base Period":"base_period" ,"Other notes":"other_notes" ,"Aggregation method":"aggregation_method" ,"Limitations and exceptions":"limitations_and_exceptions" ,"Notes from original source":"notes_from_original_source" ,"General comments":"general_comments" ,"Source":"source" ,"Statistical concept and methodology":"statistical_concept_and_methodology" ,"Development relevance":"development_relevance" ,"Related source links":"related_source_links" ,"Other web links":"other_web_links" ,"Related indicators":"related_indicators" ,"License Type":"license_type"} - # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes + resources: request_memory: "2G" request_cpu: "1" @@ -95,27 +88,17 @@ dag: args: task_id: "load_series_summary_to_bq" - # The GCS bucket where the CSV file is located in. - bucket: "{{ var.json.shared.composer_bucket }}" + bucket: "{{ var.value.composer_bucket }}" - # The GCS object path for the CSV file source_objects: ["data/world_bank_intl_debt/series_summary/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.series_summary" - # Use this if your CSV file contains a header row skip_leading_rows: 1 allow_quoted_newlines: True - # How to write data to the table: overwrite, append, or write if empty - # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition write_disposition: "WRITE_TRUNCATE" - # The BigQuery table schema based on the CSV file. For more info, see - # https://cloud.google.com/bigquery/docs/schemas. - # Always use snake_case and lowercase for column names, and be explicit, - # i.e. specify modes for all columns. - schema_fields: - name: "series_code" type: "string" diff --git a/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py b/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py index 8a15865d4..d884bd631 100644 --- a/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py +++ b/datasets/world_bank_intl_debt/series_summary/series_summary_dag.py @@ -62,7 +62,7 @@ "SOURCE_FILE": "files/data.csv", "COLUMN_TO_REMOVE": "Unnamed: 20", "TARGET_FILE": "files/data_output.csv", - "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}", "TARGET_GCS_PATH": "data/world_bank_intl_debt/series_summary/data_output.csv", "PIPELINE_NAME": "series_summary", "CSV_HEADERS": '["series_code" ,"topic" ,"indicator_name" ,"short_definition" ,"long_definition" ,"unit_of_measure" ,"periodicity" ,"base_period" ,"other_notes" ,"aggregation_method" ,"limitations_and_exceptions" ,"notes_from_original_source" ,"general_comments" ,"source" ,"statistical_concept_and_methodology" ,"development_relevance" ,"related_source_links" ,"other_web_links" ,"related_indicators" ,"license_type"]', @@ -74,7 +74,7 @@ # Task to load CSV data to a BigQuery table load_series_summary_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( task_id="load_series_summary_to_bq", - bucket="{{ var.json.shared.composer_bucket }}", + bucket="{{ var.value.composer_bucket }}", source_objects=["data/world_bank_intl_debt/series_summary/data_output.csv"], source_format="CSV", destination_project_dataset_table="world_bank_intl_debt.series_summary", diff --git a/datasets/world_bank_intl_debt/series_times/pipeline.yaml b/datasets/world_bank_intl_debt/series_times/pipeline.yaml index 4df445543..99b800911 100644 --- a/datasets/world_bank_intl_debt/series_times/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_times/pipeline.yaml @@ -16,10 +16,8 @@ resources: - type: bigquery_table - # Required Properties: table_id: series_times - # Description of the table description: "Series Times table" dag: @@ -29,7 +27,6 @@ dag: default_args: owner: "Google" - # When set to True, keeps a task from getting triggered if the previous schedule for the task hasn’t succeeded depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -40,7 +37,6 @@ dag: tasks: - operator: "KubernetesPodOperator" - # Task description description: "Run CSV transform within kubernetes pod" args: @@ -49,10 +45,8 @@ dag: startup_timeout_seconds: 600 - # The name of the pod in which the task will run. This will be used (plus a random suffix) to generate a pod id name: "series_times" - # The namespace to run within Kubernetes. Always set its value to "default" because we follow the guideline that KubernetesPodOperator will only be used for very light workloads, i.e. use the Cloud Composer environment"s resources without starving other pipelines. namespace: "default" affinity: @@ -68,23 +62,20 @@ dag: image_pull_policy: "Always" - # Docker images will be built and pushed to GCR by default whenever the `scripts/generate_dag.py` is run. To skip building and pushing images, use the optional `--skip-builds` flag. image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - # Set the environment variables you need initialized in the container. Use these as input variables for the script your container is expected to perform. env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries-Time.csv" SOURCE_FILE: "files/data.csv" COLUMN_TO_REMOVE: "Unnamed: 3" TARGET_FILE: "files/data_output.csv" - TARGET_GCS_BUCKET: "{{ var.json.shared.composer_bucket }}" + TARGET_GCS_BUCKET: "{{ var.value.composer_bucket }}" TARGET_GCS_PATH: "data/world_bank_intl_debt/series_times/data_output.csv" PIPELINE_NAME: "series_times" CSV_HEADERS: >- ["series_code","year","description"] RENAME_MAPPINGS: >- {"SeriesCode" : "series_code" ,"Year" : "year" ,"DESCRIPTION" : "description"} - # Set resource limits for the pod here. For resource units in Kubernetes, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes resources: request_memory: "2G" request_cpu: "1" @@ -95,26 +86,15 @@ dag: args: task_id: "load_series_times_to_bq" - # The GCS bucket where the CSV file is located in. - bucket: "{{ var.json.shared.composer_bucket }}" + bucket: "{{ var.value.composer_bucket }}" - # The GCS object path for the CSV file source_objects: ["data/world_bank_intl_debt/series_times/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.series_times" - # Use this if your CSV file contains a header row skip_leading_rows: 1 - - # How to write data to the table: overwrite, append, or write if empty - # See https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/WriteDisposition write_disposition: "WRITE_TRUNCATE" - # The BigQuery table schema based on the CSV file. For more info, see - # https://cloud.google.com/bigquery/docs/schemas. - # Always use snake_case and lowercase for column names, and be explicit, - # i.e. specify modes for all columns. - schema_fields: - name: "series_code" type: "string" diff --git a/datasets/world_bank_intl_debt/series_times/series_times_dag.py b/datasets/world_bank_intl_debt/series_times/series_times_dag.py index b0f4c4c11..a8c8c13ba 100644 --- a/datasets/world_bank_intl_debt/series_times/series_times_dag.py +++ b/datasets/world_bank_intl_debt/series_times/series_times_dag.py @@ -62,7 +62,7 @@ "SOURCE_FILE": "files/data.csv", "COLUMN_TO_REMOVE": "Unnamed: 3", "TARGET_FILE": "files/data_output.csv", - "TARGET_GCS_BUCKET": "{{ var.json.shared.composer_bucket }}", + "TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}", "TARGET_GCS_PATH": "data/world_bank_intl_debt/series_times/data_output.csv", "PIPELINE_NAME": "series_times", "CSV_HEADERS": '["series_code","year","description"]', @@ -74,7 +74,7 @@ # Task to load CSV data to a BigQuery table load_series_times_to_bq = gcs_to_bq.GoogleCloudStorageToBigQueryOperator( task_id="load_series_times_to_bq", - bucket="{{ var.json.shared.composer_bucket }}", + bucket="{{ var.value.composer_bucket }}", source_objects=["data/world_bank_intl_debt/series_times/data_output.csv"], source_format="CSV", destination_project_dataset_table="world_bank_intl_debt.series_times", From 2ae2ee234f8d5f009c2191519fbb147529cade52 Mon Sep 17 00:00:00 2001 From: Dipannita Banerjee Date: Wed, 6 Oct 2021 14:12:15 +0000 Subject: [PATCH 9/9] fix: worked on review comments --- .../_images/run_csv_transform_kub/Dockerfile | 13 ------------ .../run_csv_transform_kub/csv_transform.py | 4 ++-- .../run_csv_transform_kub/requirements.txt | 1 - .../country_series_definitions/pipeline.yaml | 18 ----------------- .../country_summary/pipeline.yaml | 18 ----------------- datasets/world_bank_intl_debt/dataset.yaml | 17 ---------------- .../series_summary/pipeline.yaml | 20 ------------------- .../series_times/pipeline.yaml | 19 ------------------ 8 files changed, 2 insertions(+), 108 deletions(-) diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile index 154e953cb..887b5ddbc 100644 --- a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/Dockerfile @@ -13,8 +13,6 @@ # limitations under the License. # The base image for this build -# FROM gcr.io/google.com/cloudsdktool/cloud-sdk:352.0.0-slim -# FROM python:3.8 FROM python:3.8 # Allow statements and log messages to appear in Cloud logs @@ -22,20 +20,9 @@ ENV PYTHONUNBUFFERED True RUN apt-get -y update && apt-get install -y apt-transport-https ca-certificates gnupg &&\ echo "deb https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list &&\ - # echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - &&\ apt-get -y update && apt-get install -y google-cloud-sdk - # echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ - # curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ - # apt-get update && apt-get install -y google-cloud-sdk=${CLOUD_SDK_VERSION}-0 $INSTALL_COMPONENTS && \ - - - -# echo "deb https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ -# apt-get update && apt-get install apt-transport-https ca-certificates gnupg && \ -# curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ -# apt-get update && sudo apt-get install google-cloud-sdk # Copy the requirements file into the image COPY requirements.txt ./ diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py index e59ed3402..467bcb1ad 100644 --- a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/csv_transform.py @@ -116,8 +116,8 @@ def delete_column(df: pd.DataFrame, column_name: str) -> None: def extract_year(string_val: str) -> str: - string_val = string_val[2:] - return string_val + # emaple : YR2018 + return string_val[2:] def save_to_new_file(df: pd.DataFrame, file_path: str) -> None: diff --git a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt index f36704793..a13f29317 100644 --- a/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt +++ b/datasets/world_bank_intl_debt/_images/run_csv_transform_kub/requirements.txt @@ -1,3 +1,2 @@ -requests pandas google-cloud-storage diff --git a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml index 74c147764..2edd7f94b 100644 --- a/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml +++ b/datasets/world_bank_intl_debt/country_series_definitions/pipeline.yaml @@ -17,7 +17,6 @@ resources: - type: bigquery_table table_id: country_series_definitions - description: "Country Series Definition table" dag: @@ -26,7 +25,6 @@ dag: dag_id: country_series_definitions default_args: owner: "Google" - depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -36,19 +34,12 @@ dag: tasks: - operator: "KubernetesPodOperator" - description: "Run CSV transform within kubernetes pod" - args: - task_id: "country_series_definitions_transform_csv" - startup_timeout_seconds: 600 - name: "country_series_definitions" - namespace: "default" - affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -58,12 +49,8 @@ dag: operator: In values: - "pool-e2-standard-4" - - image_pull_policy: "Always" - image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry-Series.csv" SOURCE_FILE: "files/data.csv" @@ -85,15 +72,11 @@ dag: args: task_id: "load_country_series_definitions_to_bq" - bucket: "{{ var.value.composer_bucket }}" - source_objects: ["data/world_bank_intl_debt/country_series_definitions/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.country_series_definitions" - skip_leading_rows: 1 - write_disposition: "WRITE_TRUNCATE" schema_fields: @@ -107,6 +90,5 @@ dag: type: "string" mode: "nullable" - graph_paths: - "country_series_definitions_transform_csv >> load_country_series_definitions_to_bq" diff --git a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml index 68177ac52..8da1233eb 100644 --- a/datasets/world_bank_intl_debt/country_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/country_summary/pipeline.yaml @@ -17,7 +17,6 @@ resources: - type: bigquery_table table_id: country_summary - description: "Country Summary table" dag: @@ -26,7 +25,6 @@ dag: dag_id: country_summary default_args: owner: "Google" - depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -36,19 +34,12 @@ dag: tasks: - operator: "KubernetesPodOperator" - description: "Run CSV transform within kubernetes pod" - args: - task_id: "country_summary_transform_csv" - startup_timeout_seconds: 600 - name: "country_summary" - namespace: "default" - affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -58,12 +49,8 @@ dag: operator: In values: - "pool-e2-standard-4" - - image_pull_policy: "Always" - image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSCountry.csv" SOURCE_FILE: "files/data.csv" @@ -82,19 +69,14 @@ dag: - operator: "GoogleCloudStorageToBigQueryOperator" description: "Task to load CSV data to a BigQuery table" - args: task_id: "load_country_summary_to_bq" - bucket: "{{ var.value.composer_bucket }}" - source_objects: ["data/world_bank_intl_debt/country_summary/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.country_summary" - skip_leading_rows: 1 allow_quoted_newlines: True - write_disposition: "WRITE_TRUNCATE" schema_fields: diff --git a/datasets/world_bank_intl_debt/dataset.yaml b/datasets/world_bank_intl_debt/dataset.yaml index 1b430e7f9..38585625a 100644 --- a/datasets/world_bank_intl_debt/dataset.yaml +++ b/datasets/world_bank_intl_debt/dataset.yaml @@ -13,31 +13,14 @@ # limitations under the License. dataset: - # The `dataset` block includes properties for your dataset that will be shown - # to users of your data on the Google Cloud website. - - # Must be exactly the same name as the folder name your dataset.yaml is in. name: world_bank_intl_debt - - # A friendly, human-readable name of the dataset friendly_name: world_bank_intl_debt - - # A short, descriptive summary of the dataset. description: "World Bank Intl Debt" - - # A list of sources the dataset is derived from, using the YAML list syntax. dataset_sources: ~ - - # A list of terms and conditions that users of the dataset should agree on, - # using the YAML list syntax. terms_of_use: ~ resources: - # A list of Google Cloud resources needed by your dataset. In principle, all - # pipelines under a dataset should be able to share these resources. - - type: bigquery_dataset - dataset_id: world_bank_intl_debt description: "World Bank Intl Debt" diff --git a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml index 573d546d0..13821a783 100644 --- a/datasets/world_bank_intl_debt/series_summary/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_summary/pipeline.yaml @@ -17,7 +17,6 @@ resources: - type: bigquery_table table_id: series_summary - description: "Series Summary table" dag: @@ -26,7 +25,6 @@ dag: dag_id: series_summary default_args: owner: "Google" - depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 @@ -36,20 +34,12 @@ dag: tasks: - operator: "KubernetesPodOperator" - - description: "Run CSV transform within kubernetes pod" - args: - task_id: "series_summary_transform_csv" - startup_timeout_seconds: 600 - name: "series_summary" - namespace: "default" - affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -59,12 +49,8 @@ dag: operator: In values: - "pool-e2-standard-4" - - image_pull_policy: "Always" - image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries.csv" SOURCE_FILE: "files/data.csv" @@ -77,7 +63,6 @@ dag: ["series_code" ,"topic" ,"indicator_name" ,"short_definition" ,"long_definition" ,"unit_of_measure" ,"periodicity" ,"base_period" ,"other_notes" ,"aggregation_method" ,"limitations_and_exceptions" ,"notes_from_original_source" ,"general_comments" ,"source" ,"statistical_concept_and_methodology" ,"development_relevance" ,"related_source_links" ,"other_web_links" ,"related_indicators" ,"license_type"] RENAME_MAPPINGS: >- {"Series Code":"series_code" ,"Topic":"topic" ,"Indicator Name":"indicator_name" ,"Short definition":"short_definition" ,"Long definition":"long_definition" ,"Unit of measure":"unit_of_measure" ,"Periodicity":"periodicity" ,"Base Period":"base_period" ,"Other notes":"other_notes" ,"Aggregation method":"aggregation_method" ,"Limitations and exceptions":"limitations_and_exceptions" ,"Notes from original source":"notes_from_original_source" ,"General comments":"general_comments" ,"Source":"source" ,"Statistical concept and methodology":"statistical_concept_and_methodology" ,"Development relevance":"development_relevance" ,"Related source links":"related_source_links" ,"Other web links":"other_web_links" ,"Related indicators":"related_indicators" ,"License Type":"license_type"} - resources: request_memory: "2G" request_cpu: "1" @@ -87,16 +72,12 @@ dag: args: task_id: "load_series_summary_to_bq" - bucket: "{{ var.value.composer_bucket }}" - source_objects: ["data/world_bank_intl_debt/series_summary/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.series_summary" - skip_leading_rows: 1 allow_quoted_newlines: True - write_disposition: "WRITE_TRUNCATE" schema_fields: @@ -161,6 +142,5 @@ dag: type: "string" mode: "nullable" - graph_paths: - "series_summary_transform_csv >> load_series_summary_to_bq" diff --git a/datasets/world_bank_intl_debt/series_times/pipeline.yaml b/datasets/world_bank_intl_debt/series_times/pipeline.yaml index 99b800911..2044090c8 100644 --- a/datasets/world_bank_intl_debt/series_times/pipeline.yaml +++ b/datasets/world_bank_intl_debt/series_times/pipeline.yaml @@ -17,7 +17,6 @@ resources: - type: bigquery_table table_id: series_times - description: "Series Times table" dag: @@ -26,29 +25,20 @@ dag: dag_id: series_times default_args: owner: "Google" - depends_on_past: False start_date: "2021-03-01" max_active_runs: 1 schedule_interval: "@daily" catchup: False default_view: graph - tasks: - operator: "KubernetesPodOperator" - description: "Run CSV transform within kubernetes pod" - args: - task_id: "series_times_transform_csv" - startup_timeout_seconds: 600 - name: "series_times" - namespace: "default" - affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -58,12 +48,8 @@ dag: operator: In values: - "pool-e2-standard-4" - - image_pull_policy: "Always" - image: "{{ var.json.world_bank_intl_debt.container_registry.run_csv_transform_kub }}" - env_vars: SOURCE_URL: "gs://pdp-feeds-staging/RelayWorldBank/IDS_CSV/IDSSeries-Time.csv" SOURCE_FILE: "files/data.csv" @@ -82,16 +68,12 @@ dag: - operator: "GoogleCloudStorageToBigQueryOperator" description: "Task to load CSV data to a BigQuery table" - args: task_id: "load_series_times_to_bq" - bucket: "{{ var.value.composer_bucket }}" - source_objects: ["data/world_bank_intl_debt/series_times/data_output.csv"] source_format: "CSV" destination_project_dataset_table: "world_bank_intl_debt.series_times" - skip_leading_rows: 1 write_disposition: "WRITE_TRUNCATE" @@ -106,6 +88,5 @@ dag: type: "string" mode: "nullable" - graph_paths: - "series_times_transform_csv >> load_series_times_to_bq"