Skip to content

Commit

Permalink
Merge branch 'main' of github.com:VeinsOfTheEarth/rabpro
Browse files Browse the repository at this point in the history
  • Loading branch information
jsta committed Jan 26, 2022
2 parents 143842c + 0276f00 commit e265135
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 18 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ on:
- "tests/**"
- .coveragerc
- setup.py
pull_request:
paths:
- "rabpro/**"

jobs:
build:
Expand Down
17 changes: 10 additions & 7 deletions rabpro/subbasin_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def main(
# Dictionary for determining which rasters and statistics to compute
control = _get_controls(dataset_list)
ee.Initialize()

# Create water occurence mask
occ_mask = ee.Image("JRC/GSW1_3/GlobalSurfaceWater").select("occurrence").lt(90)

Expand All @@ -168,6 +168,7 @@ def main(
featureCollection = ee.FeatureCollection(gee_feature_path)

# For each raster
datas, tasks = [], []
for d in control:
if d.band is None or d.band == "None":
if d.type == "image":
Expand Down Expand Up @@ -254,14 +255,16 @@ def remove_geometry(feat):
task.start()

if test:
return data, task
datas.append(data)
tasks.append(task)
else:
return (
datas.append(
table.getDownloadURL(
filetype="csv", filename=dataset_to_filename(d.data_id, d.band, tag)
),
task,
)
))
tasks.append(task)

return datas, tasks


def _parse_reducers(stats=None, base=None):
Expand Down Expand Up @@ -355,7 +358,7 @@ def _get_controls(datasets):
print(
f"Warning: requested end date later than expected for {d.data_id}:{d.band}"
)

d.stats = set(d.stats + ["count", "mean"])

if "no_data" in gee_dataset["bands"][d.band]:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setuptools.setup(
name="rabpro",
version="0.2.2",
version="0.3.0",
author="Example Author",
author_email="author@example.com",
description="Package to delineate subbasins and compute statistics.",
Expand Down
8 changes: 4 additions & 4 deletions tests/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ def stattest(self, stats, datasets, length=1):
data, task = self.rpo.basin_stats(datasets, folder="rabpro test", test=True)

# Only check one set of stats for time-series data, but check length is equal
self.assertEqual(len(data["features"]), length)
self.assertEqual(len(data[0]["features"]), length)

ret_stats = data["features"][0]["properties"]
ret_stats = data[0]["features"][0]["properties"]
print(f"Expected stats: {stats}")
print(f"Returned stats: {ret_stats}")

Expand All @@ -62,12 +62,12 @@ def stattest(self, stats, datasets, length=1):
)

for _ in range(12):
status = task.status()["state"]
status = task[0].status()["state"]
if status in ["READY", "RUNNING", "COMPLETED"]:
break
time.sleep(10)

self.assertTrue(task.status()["state"] in ["READY", "RUNNING", "COMPLETED"])
self.assertTrue(task[0].status()["state"] in ["READY", "RUNNING", "COMPLETED"])


class MERITTest(DataTestCase):
Expand Down
12 changes: 6 additions & 6 deletions tests/test_basin_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def asdf(feat):
test=True,
)

res = pd.concat([clean_res(feature) for feature in data["features"]])
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])

assert all(res["asdf"] == res["max"])

Expand All @@ -58,7 +58,7 @@ def test_categorical_imgcol():
)

res = pd.concat(
[clean_freqhist(feature, "LC_Type1") for feature in data["features"]]
[clean_freqhist(feature, "LC_Type1") for feature in data[0]["features"]]
)

assert res.shape[1] > 4
Expand All @@ -70,7 +70,7 @@ def test_timeindexed_imgcol():
[Dataset("JRC/GSW1_3/YearlyHistory", "waterClass",)], sb_inc_gdf=gdf, test=True,
)

res = pd.concat([clean_res(feature) for feature in data["features"]])
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])

assert res["mean"].iloc[0] > 0
assert res.shape[0] > 0
Expand All @@ -91,7 +91,7 @@ def test_timeindexedspecific_imgcol():
test=True,
)

res = pd.concat([clean_res(feature) for feature in data["features"]])
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])

assert res.shape[0] == 2

Expand All @@ -104,7 +104,7 @@ def test_nontimeindexed_imgcol():
test=True,
)

res = pd.concat([clean_res(feature) for feature in data["features"]])
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])

assert res.shape[0] > 0

Expand All @@ -123,7 +123,7 @@ def test_img():
test=True,
)

res = pd.DataFrame(data["features"][0]["properties"], index=[0])
res = pd.DataFrame(data[0]["features"][0]["properties"], index=[0])

assert float(res["mean"]) > 0
assert res.shape[1] == 9

0 comments on commit e265135

Please sign in to comment.