Skip to content

Commit

Permalink
fix: Now uses chunksize :)
Browse files Browse the repository at this point in the history
  • Loading branch information
nlarge-google committed Oct 7, 2021
1 parent a17651d commit f16a7fe
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 67 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import datetime
import json
import logging
import os
import pathlib
from shutil import copyfile

import pandas as pd
import requests
Expand All @@ -28,40 +26,86 @@ def main(
source_url_json: str,
source_file: pathlib.Path,
target_file: pathlib.Path,
chunksize: str,
target_gcs_bucket: str,
target_gcs_path: str,
) -> None:

logging.info("San Francisco Bikeshare Stations process started")

logging.info("creating 'files' folder")
pathlib.Path("./files").mkdir(parents=True, exist_ok=True)
logging.info("creating 'templates' folder")
pathlib.Path("./templates").mkdir(parents=True, exist_ok=True)

logging.info(f"Extracting URL for stations: {source_url_json}")
source_file_stations_csv = str(source_file).replace(".csv", "") + "_stations.csv"
source_file_stations_json = str(source_file).replace(".csv", "") + "_stations.json"
download_file_json(source_url_json, source_file_stations_json, source_file)

chunksz = int(chunksize)

logging.info(f"Opening batch file {source_file}")
with pd.read_csv(
source_file, # path to main source file to load in batches
engine="python",
encoding="utf-8",
quotechar='"', # string separator, typically double-quotes
chunksize=chunksz, # size of batch data, in no. of records
sep=",", # data column separator, typically ","
) as reader:
for chunk_number, chunk in enumerate(reader):
target_file_batch = str(target_file).replace(
".csv", "-" + str(chunk_number) + ".csv"
)
df = pd.DataFrame()
df = pd.concat([df, chunk])
process_chunk(df, target_file_batch, target_file, (not chunk_number == 0))

logging.info(f"Downloading stations json file {source_url_json}")
download_file_json(
source_url_json, source_file_stations_json, source_file_stations_csv
)
copyfile(source_file_stations_json, "./templates/bikeshare_stations.json")
upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path)

logging.info("San Francisco Bikeshare Stations process completed")


def process_chunk(
df: pd.DataFrame, target_file_batch: str, target_file: str, skip_header: bool
) -> None:
df = rename_headers(df)
df = filter_empty_data(df)
df = generate_location(df)
df = resolve_datatypes(df)
df = reorder_headers(df)
save_to_new_file(df, file_path=str(target_file_batch))
append_batch_file(target_file_batch, target_file, skip_header, not (skip_header))


def rename_headers(df: pd.DataFrame) -> None:
logging.info("Renaming Headers")
header_names = {
"data.stations.station_id": "station_id",
"data.stations.name": "name",
"data.stations.short_name": "short_name",
"data.stations.lat": "lat",
"data.stations.lon": "lon",
"data.stations.region_id": "region_id",
"data.stations.rental_methods": "rental_methods",
"data.stations.capacity": "capacity",
"data.stations.eightd_has_key_dispenser": "eightd_has_key_dispenser",
"data.stations.has_kiosk": "has_kiosk",
"data.stations.external_id": "external_id",
}

logging.info(f"Opening stations file {source_file_stations_csv}")
df = pd.read_csv(source_file_stations_csv)
df.rename(columns=header_names)

logging.info(f"Transformation Process Starting.. {source_file}")
return df

logging.info(f"Renaming Columns {source_file_stations_csv}")
rename_headers(df)

def filter_empty_data(df: pd.DataFrame) -> pd.DataFrame:
logging.info("Filter rows with empty key data")
df = df[df["station_id"] != ""]
df = df[df["name"] != ""]
df = df[df["lat"] != ""]
df = df[df["lon"] != ""]

return df


def generate_location(df: pd.DataFrame) -> pd.DataFrame:
logging.info("Generating location data")
df["station_geom"] = (
"POINT("
+ df["lon"][:].astype("string")
Expand All @@ -70,9 +114,18 @@ def main(
+ ")"
)

return df


def resolve_datatypes(df: pd.DataFrame) -> pd.DataFrame:
logging.info("Resolving datatypes")
df["region_id"] = df["region_id"].astype("Int64")

logging.info("Re-ordering Headers")
return df


def reorder_headers(df: pd.DataFrame) -> pd.DataFrame:
logging.info("Reordering Headers")
df = df[
[
"station_id",
Expand All @@ -90,47 +143,28 @@ def main(
]
]

logging.info(f"Transformation Process complete .. {source_file}")

logging.info(f"Saving to output file.. {target_file}")

try:
save_to_new_file(df, file_path=str(target_file))
except Exception as e:
logging.error(f"Error saving output file: {e}.")

logging.info(
f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}"
)
upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path)

logging.info("San Francisco Bikeshare Stations process completed")
return df


def datetime_from_int(dt_int: int) -> str:
return datetime.datetime.fromtimestamp(dt_int).strftime("%Y-%m-%d %H:%M:%S")


def convert_dt_format(date_str: str, time_str: str) -> str:
return str(datetime.datetime.strptime(date_str, "%m/%d/%Y").date()) + " " + time_str


def rename_headers(df: pd.DataFrame) -> None:
header_names = {
"data.stations.station_id": "station_id",
"data.stations.name": "name",
"data.stations.short_name": "short_name",
"data.stations.lat": "lat",
"data.stations.lon": "lon",
"data.stations.region_id": "region_id",
"data.stations.rental_methods": "rental_methods",
"data.stations.capacity": "capacity",
"data.stations.eightd_has_key_dispenser": "eightd_has_key_dispenser",
"data.stations.has_kiosk": "has_kiosk",
"data.stations.external_id": "external_id",
}

df.rename(columns=header_names, inplace=True)
def append_batch_file(
batch_file_path: str, target_file_path: str, skip_header: bool, truncate_file: bool
) -> None:
data_file = open(batch_file_path, "r")
if truncate_file:
target_file = open(target_file_path, "w+").close()
target_file = open(target_file_path, "a+")
if skip_header:
logging.info(
f"Appending batch file {batch_file_path} to {target_file_path} with skip header"
)
next(data_file)
else:
logging.info(f"Appending batch file {batch_file_path} to {target_file_path}")
target_file.write(data_file.read())
data_file.close()
target_file.close()
if os.path.exists(batch_file_path):
os.remove(batch_file_path)


def save_to_new_file(df, file_path) -> None:
Expand All @@ -143,17 +177,15 @@ def download_file_json(

# this function extracts the json from a source url and creates
# a csv file from that data to be used as an input file
logging.info(f"Downloading stations json file {source_url_json}")

# download json url into object r
r = requests.get(source_url_json + ".json", stream=True)

# push object r (json) into json file
try:
with open(source_file_json, "wb") as f:
for chunk in r:
f.write(chunk)
except ValueError:
print(f"Writing JSON to {source_file_json} has failed")
with open(source_file_json, "wb") as f:
for chunk in r:
f.write(chunk)

f = open(
source_file_json.strip(),
Expand All @@ -177,6 +209,7 @@ def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str)
source_url_json=os.environ["SOURCE_URL_JSON"],
source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(),
target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(),
chunksize=os.environ["CHUNKSIZE"],
target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"],
target_gcs_path=os.environ["TARGET_GCS_PATH"],
)
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,11 @@
"SOURCE_URL_JSON": "https://gbfs.baywheels.com/gbfs/fr/station_information",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"CHUNKSIZE": "750000",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/san_francisco_bikeshare_stations/bikeshare_stations/data_output.csv",
},
resources={"limit_memory": "2G", "limit_cpu": "1"},
resources={"limit_memory": "8G", "limit_cpu": "3"},
)

# Task to load CSV data to a BigQuery table
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,12 @@ dag:
SOURCE_URL_JSON: "https://gbfs.baywheels.com/gbfs/fr/station_information"
SOURCE_FILE: "files/data.csv"
TARGET_FILE: "files/data_output.csv"
CHUNKSIZE: "750000"
TARGET_GCS_BUCKET: "{{ var.value.composer_bucket }}"
TARGET_GCS_PATH: "data/san_francisco_bikeshare_stations/bikeshare_stations/data_output.csv"
resources:
limit_memory: "2G"
limit_cpu: "1"
limit_memory: "8G"
limit_cpu: "3"

- operator: "GoogleCloudStorageToBigQueryOperator"
description: "Task to load CSV data to a BigQuery table"
Expand Down

0 comments on commit f16a7fe

Please sign in to comment.