-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
140 validation metadata #165
Changes from 9 commits
75e8af3
9bfb49d
85bac65
fd5ab4a
314e771
68d5114
ffc4a12
b7d304e
e0307cd
5c51999
232fe29
a8f7185
3c4d2a1
1e1bef2
b4481c7
aedd46b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,7 +3,7 @@ | |
|
||
from io import BytesIO | ||
from fastapi import UploadFile | ||
from regtech_data_validator.create_schemas import validate_phases | ||
from regtech_data_validator.create_schemas import validate_phases, ValidationPhase | ||
from regtech_data_validator.data_formatters import df_to_json, df_to_download | ||
from regtech_data_validator.checks import Severity | ||
import pandas as pd | ||
|
@@ -101,7 +101,7 @@ async def validate_and_update_submission(period_code: str, lei: str, submission: | |
) | ||
else: | ||
submission.state = SubmissionState.VALIDATION_SUCCESSFUL | ||
submission.validation_json = json.loads(df_to_json(result[1])) | ||
submission.validation_json = build_validation_results(result) | ||
submission_report = df_to_download(result[1]) | ||
await upload_to_storage( | ||
period_code, lei, str(submission.id) + REPORT_QUALIFIER, submission_report.encode("utf-8") | ||
|
@@ -112,7 +112,6 @@ async def validate_and_update_submission(period_code: str, lei: str, submission: | |
log.error("The file is malformed", re, exc_info=True, stack_info=True) | ||
submission.state = SubmissionState.SUBMISSION_UPLOAD_MALFORMED | ||
await update_submission(submission) | ||
|
||
except Exception as e: | ||
log.error( | ||
f"Validation for submission {submission.id} did not complete due to an unexpected error.", | ||
|
@@ -122,3 +121,27 @@ async def validate_and_update_submission(period_code: str, lei: str, submission: | |
) | ||
submission.state = SubmissionState.VALIDATION_ERROR | ||
await update_submission(submission) | ||
|
||
|
||
def build_validation_results(result): | ||
val_json = json.loads(df_to_json(result[1])) | ||
val_res = {} | ||
|
||
if result[2] == ValidationPhase.SYNTACTICAL.value: | ||
val_res["syntax_errors"]["details"] = val_json | ||
val_res["syntax_errors"]["count"] = len(val_res["syntax_errors"]["details"]) | ||
else: | ||
val_res = { | ||
"syntax_errors": {"count": 0, "details": []}, | ||
"logic_errors": {"count": 0, "details": []}, | ||
"logic_warnings": {"count": 0, "details": []}, | ||
} | ||
for v in val_json: | ||
if v["validation"]["severity"] == Severity.WARNING.value: | ||
val_res["logic_warnings"]["details"].append(v) | ||
elif v["validation"]["severity"] == Severity.ERROR.value: | ||
val_res["logic_errors"]["details"].append(v) | ||
lchen-2101 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
val_res["logic_warnings"]["count"] = len(val_res["logic_warnings"]["details"]) | ||
val_res["logic_errors"]["count"] = len(val_res["logic_errors"]["details"]) | ||
|
||
return val_res | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This else block can be even further refined to:
to avoid looping and appending. List comprehension in python is faster on large data sets. |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -47,9 +47,99 @@ def warning_submission_mock(mocker: MockerFixture, validate_submission_mock: Moc | |
|
||
|
||
@pytest.fixture(scope="function") | ||
def df_to_json_mock(mocker: MockerFixture, validate_submission_mock: Mock): | ||
mock_json_formatting = mocker.patch("sbl_filing_api.services.submission_processor.df_to_json") | ||
mock_json_formatting.return_value = "[{}]" | ||
def validation_success_mock(mocker: MockerFixture, validate_submission_mock: Mock): | ||
mock_json_formatting = mocker.patch("sbl_filing_api.services.submission_processor.build_validation_results") | ||
mock_json_formatting.return_value = """ | ||
{ | ||
"syntax_errors": { | ||
"count": 0, | ||
"details": [] | ||
}, | ||
"logic_errors": { | ||
"count": 0, | ||
"details": [] | ||
}, | ||
"logic_warnings": { | ||
"count": 0, | ||
"details": [] | ||
} | ||
}""" | ||
return mock_json_formatting | ||
|
||
|
||
@pytest.fixture(scope="function") | ||
def validation_syntax_errors_mock(mocker: MockerFixture, validate_submission_mock: Mock): | ||
mock_json_formatting = mocker.patch("sbl_filing_api.services.submission_processor.build_validation_results") | ||
mock_json_formatting.return_value = """ | ||
{ | ||
"syntax_errors": { | ||
"count": 2, | ||
"details": [] | ||
} | ||
}""" | ||
return mock_json_formatting | ||
|
||
|
||
@pytest.fixture(scope="function") | ||
def validation_logic_warnings_mock(mocker: MockerFixture, validate_submission_mock: Mock): | ||
mock_json_formatting = mocker.patch("sbl_filing_api.services.submission_processor.build_validation_results") | ||
mock_json_formatting.return_value = """ | ||
{ | ||
"syntax_errors": { | ||
"count": 0, | ||
"details": [] | ||
}, | ||
"logic_errors": { | ||
"count": 0, | ||
"details": [] | ||
}, | ||
"logic_warnings": { | ||
"count": 1, | ||
"details": [] | ||
} | ||
}""" | ||
return mock_json_formatting | ||
|
||
|
||
@pytest.fixture(scope="function") | ||
def validation_logic_errors_mock(mocker: MockerFixture, validate_submission_mock: Mock): | ||
mock_json_formatting = mocker.patch("sbl_filing_api.services.submission_processor.build_validation_results") | ||
mock_json_formatting.return_value = """ | ||
{ | ||
"syntax_errors": { | ||
"count": 0, | ||
"details": [] | ||
}, | ||
"logic_errors": { | ||
"count": 4, | ||
"details": [] | ||
}, | ||
"logic_warnings": { | ||
"count": 0, | ||
"details": [] | ||
} | ||
}""" | ||
return mock_json_formatting | ||
|
||
|
||
@pytest.fixture(scope="function") | ||
def validation_logic_warnings_and_errors_mock(mocker: MockerFixture, validate_submission_mock: Mock): | ||
mock_json_formatting = mocker.patch("sbl_filing_api.services.submission_processor.build_validation_results") | ||
mock_json_formatting.return_value = """ | ||
{ | ||
"syntax_errors": { | ||
"count": 0, | ||
"details": [] | ||
}, | ||
"logic_errors": { | ||
"count": 3, | ||
"details": [] | ||
}, | ||
"logic_warnings": { | ||
"count": 2, | ||
"details": [] | ||
} | ||
}""" | ||
return mock_json_formatting | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. mocking out the return value for the method you are trying to test isn't productive; you are basically bypassing testing method, which is why the test coverage remains unchanged; if you click on the link with the lines in the test coverage comment, it shows the whole method is uncovered. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ideally, you would be constructing the dataframe to pass to the method for testing, but if that's too much of a lift, something you can mock for example could be |
||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just like below, this section can be shortened to just:
val_res = {"syntax_errors": {"count": len(val_json), "details": val_json}}