Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix for enhancement#1003: Google Code-in Task to add a command for di… #256

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
13 changes: 13 additions & 0 deletions evalai/submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from evalai.utils.submissions import (
display_submission_details,
display_submission_result,
display_submission_stderr,
convert_bytes_to,
)
from evalai.utils.urls import URLS
Expand Down Expand Up @@ -63,6 +64,18 @@ def result(ctx):
display_submission_result(ctx.submission_id)


@submission.command()
@click.pass_obj
def stderr(ctx):
"""
Display the submission stderr in Terminal output
"""
"""
Invoked by `evalai submission SUBMISSION_ID stterr`.
"""
display_submission_stderr(ctx.submission_id)


@click.command()
@click.argument("IMAGE", nargs=1)
@click.option(
Expand Down
24 changes: 24 additions & 0 deletions evalai/utils/submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,30 @@ def display_submission_result(submission_id):
)


def display_submission_stderr(submission_id):
"""
Function to display stderr file of a particular submission in Terminal output
"""
try:
response = submission_details_request(submission_id).json()
file_url = requests.get(response['stderr_file']).text
with open(file_url, "r") as fr:
try:
file_contents = fr.read()
print(file_contents)
fr.close()
except (OSError, IOError) as e:
echo(e)
except requests.exceptions.MissingSchema:
echo(
style(
"\nThe Submission is yet to be evaluated.\n",
bold=True,
fg="yellow",
)
)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain why have you used requests.exceptions.MissingSchema.
Going by docs,MissingSchema is for The URL schema (e.g. http or https) is missing. exception.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@vkartik97, I used this exception because as per your earlier note on my previous task “display stdout message in terminal” the stdout file is generated after the submission evaluation is completed. So if the submission id is not available means that it is supposed to be "yet to be evaluated". So I thought that would be right exception error. I just followed the same def function “display_submission_result” code from submissions.py for this one as well. Let me know if you don’t want this exception to be shown, then what exception you are suggesting. Thank you

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can use other exception or use status data from the payload



def convert_bytes_to(byte, to, bsize=1024):
"""
Convert bytes to KB, MB, GB etc.
Expand Down
49 changes: 48 additions & 1 deletion tests/data/submission_response.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,29 @@
submission = """
{
"count": 4,
"count": 5,
"next": null,
"previous": null,
"results": [
{
"challenge_phase": 251,
"created_by": 5672,
"execution_time": 0.085137,
"id": 48728,
"input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-ac19-409d-a97d-7240ea336a0c.txt",
"is_public": false,
"method_description": null,
"method_name": null,
"participant_team": 3519,
"participant_team_name": "test",
"project_url": null,
"publication_url": null,
"status": "failed",
"stderr_file": null,
"stdout_file": null,
"submission_result_file": null,
"submitted_at": "2018-06-03T09:24:09.866590Z",
"when_made_public": null
},
{
"challenge_phase": 7,
"created_by": 4,
Expand Down Expand Up @@ -123,6 +143,33 @@
"when_made_public": null
}"""


submission_stderr_details = """
{
"challenge_phase": 251,
"created_by": 5672,
"execution_time": 0.085137,
"id": 48728,
"input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-\
ac19-409d-a97d-7240ea336a0c.txt",
"is_public": false,
"method_description": null,
"method_name": null,
"participant_team": 3519,
"participant_team_name": "test",
"project_url": null,
"publication_url": null,
"status": "submitted",
"stderr_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/39f3b087-\
8f86-4757-9c93-bf0b26c1a3c2.txt",
"stdout_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/0b2c4396-\
e078-4b95-b041-83801a430874.txt",
"submission_result_file": null,
"submitted_at": "2018-06-03T09:24:09.866590Z",
"when_made_public": null
}"""


aws_credentials = """
{
"success": {
Expand Down
50 changes: 50 additions & 0 deletions tests/test_submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,3 +290,53 @@ def test_make_submission_for_docker_based_challenge(
],
)
assert result.exit_code == 0


class TestDisplaySubmissionStderr(BaseTestClass):
def setup(self):
self.submission = json.loads(submission_response.submission_stderr_details)

url = "{}{}"
responses.add(
responses.GET,
url.format(API_HOST_URL, URLS.get_submission.value).format("48728"),
json=self.submission,
status=200,
)

responses.add(
responses.GET,
self.submission["stderr_file"],
json=json.loads(submission_response.submission_stderr_details),
status=200,
)

@responses.activate
def test_display_submission_strerr_with_a_string_argument(self):
expected = (
"Usage: submission [OPTIONS] SUBMISSION_ID COMMAND [ARGS]...\n"
'\nError: Invalid value for "SUBMISSION_ID": four is not a valid integer\n'
)
runner = CliRunner()
result = runner.invoke(submission, ["four"])
response = result.output
assert response == expected

@responses.activate
def test_display_submission_strerr_with_no_argument(self):
expected = (
"Usage: submission [OPTIONS] SUBMISSION_ID COMMAND [ARGS]...\n"
'\nError: Missing argument "SUBMISSION_ID".\n'
)
runner = CliRunner()
result = runner.invoke(submission)
response = result.output
assert response == expected

@responses.activate
def test_display_submission_stderr_details(self):
expected = ""
runner = CliRunner()
result = runner.invoke(submission, ["48728", "stderr"])
response = result.output.strip()
assert response == expected