Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix for enhancement#1003: Google Code-in Task to add a command for di… #256

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
13 changes: 13 additions & 0 deletions evalai/submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from evalai.utils.submissions import (
display_submission_details,
display_submission_result,
display_submission_stderr,
convert_bytes_to,
)
from evalai.utils.urls import URLS
Expand Down Expand Up @@ -63,6 +64,18 @@ def result(ctx):
display_submission_result(ctx.submission_id)


@submission.command()
@click.pass_obj
def stderr(ctx):
"""
Display the submission stderr in Terminal output
"""
"""
Invoked by `evalai submission SUBMISSION_ID stterr`.
"""
display_submission_stderr(ctx.submission_id)


@click.command()
@click.argument("IMAGE", nargs=1)
@click.option(
Expand Down
8 changes: 8 additions & 0 deletions evalai/utils/submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,14 @@ def display_submission_result(submission_id):
)


def display_submission_stderr(submission_id):
"""
Function to display stderr file of a particular submission in Terminal output
"""
response = submission_details_request(submission_id).json()
echo(requests.get(response['stderr_file']).text)


def convert_bytes_to(byte, to, bsize=1024):
"""
Convert bytes to KB, MB, GB etc.
Expand Down
47 changes: 46 additions & 1 deletion tests/data/submission_response.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,29 @@
submission = """
{
"count": 4,
"count": 5,
"next": null,
"previous": null,
"results": [
{
"challenge_phase": 251,
"created_by": 5672,
"execution_time": 0.085137,
"id": 48728,
"input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-ac19-409d-a97d-7240ea336a0c.txt",
"is_public": false,
"method_description": null,
"method_name": null,
"participant_team": 3519,
"participant_team_name": "test",
"project_url": null,
"publication_url": null,
"status": "failed",
"stderr_file": null,
"stdout_file": null,
"submission_result_file": null,
"submitted_at": "2018-06-03T09:24:09.866590Z",
"when_made_public": null
},
{
"challenge_phase": 7,
"created_by": 4,
Expand Down Expand Up @@ -123,6 +143,31 @@
"when_made_public": null
}"""


submission_stderr_details = """
{
"challenge_phase": 251,
"created_by": 5672,
"execution_time": 0.085137,
"id": 48728,
"input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-\
ac19-409d-a97d-7240ea336a0c.txt",
"is_public": false,
"method_description": null,
"method_name": null,
"participant_team": 3519,
"participant_team_name": "test",
"project_url": null,
"publication_url": null,
"status": "submitted",
"stderr_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/39f3b087-8f86-4757-9c93-bf0b26c1a3c2.txt",
"stdout_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/0b2c4396-e078-4b95-b041-83801a430874.txt",
"submission_result_file": null,
"submitted_at": "2018-06-03T09:24:09.866590Z",
"when_made_public": null
}"""


aws_credentials = """
{
"success": {
Expand Down
51 changes: 51 additions & 0 deletions tests/test_submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,3 +290,54 @@ def test_make_submission_for_docker_based_challenge(
],
)
assert result.exit_code == 0


class TestDisplaySubmissionStderr(BaseTestClass):
def setup(self):
self.submission = json.loads(submission_response.submission_stderr_details)

url = "{}{}"
responses.add(
responses.GET,
url.format(API_HOST_URL, URLS.get_submission.value).format("48728"),
json=self.submission,
status=200,
)

responses.add(
responses.GET,
self.submission["stderr_file"],
json=json.loads(submission_response.submission_stderr_details),
status=200,
)

@responses.activate
def test_display_submission_strerr_with_a_string_argument(self):
expected = (
"Usage: submission [OPTIONS] SUBMISSION_ID COMMAND [ARGS]...\n"
'\nError: Invalid value for "SUBMISSION_ID": four is not a valid integer\n'
)
runner = CliRunner()
result = runner.invoke(submission, ["four"])
response = result.output
assert response == expected

@responses.activate
def test_display_submission_strerr_with_no_argument(self):
expected = (
"Usage: submission [OPTIONS] SUBMISSION_ID COMMAND [ARGS]...\n"
'\nError: Missing argument "SUBMISSION_ID".\n'
)
runner = CliRunner()
result = runner.invoke(submission)
response = result.output
assert response == expected

@responses.activate
def test_display_submission_stderr_details(self):
expected = ""
runner = CliRunner()
result = runner.invoke(submission, ["48728", "stderr"])
response = result.output.strip()
if response == expected:
assert response