Skip to content
This repository has been archived by the owner on Sep 5, 2023. It is now read-only.

docs(samples): added sample and tests for annotate assessment API #155

Merged
merged 9 commits into from Nov 16, 2021
43 changes: 43 additions & 0 deletions samples/snippets/annotate_assessment.py
@@ -0,0 +1,43 @@
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START recaptcha_enterprise_annotate_assessment]
from google.cloud import recaptchaenterprise_v1


def annotate_assessment(project_id: str, assessment_id: str) -> None:
""" Pre-requisite: Create an assessment before annotating.
Annotate an assessment to provide feedback on the correctness of recaptcha prediction.
Args:
project_id: Google Cloud Project ID
assessment_id: Value of the 'name' field returned from the create_assessment() call.
"""

client = recaptchaenterprise_v1.RecaptchaEnterpriseServiceClient()

assessment_name = f"projects/{project_id}/assessments/{assessment_id}"
# Build the annotation request.
# For more info on when/how to annotate, see:
# https://cloud.google.com/recaptcha-enterprise/docs/annotate-assessment#when_to_annotate
request = recaptchaenterprise_v1.AnnotateAssessmentRequest()
request.name = assessment_name
request.annotation = request.Annotation.FRAUDULENT
request.reasons = [request.Reason.FAILED_TWO_FACTOR]

# Empty response is sent back.
client.annotate_assessment(request)
print("Annotated response sent successfully ! ")


# [END recaptcha_enterprise_annotate_assessment]
8 changes: 7 additions & 1 deletion samples/snippets/create_assessment.py
Expand Up @@ -13,12 +13,14 @@
# limitations under the License.

# [START recaptcha_enterprise_create_assessment]

from google.cloud import recaptchaenterprise_v1
from google.cloud.recaptchaenterprise_v1 import Assessment


def create_assessment(
project_id: str, recaptcha_site_key: str, token: str, recaptcha_action: str
) -> None:
) -> Assessment:
""" Create an assessment to analyze the risk of a UI action.
Args:
project_id: GCloud Project ID
Expand Down Expand Up @@ -72,6 +74,10 @@ def create_assessment(
"The reCAPTCHA score for this token is: "
+ str(response.risk_analysis.score)
)
# Get the assessment name (id). Use this to annotate the assessment.
assessment_name = client.parse_assessment_path(response.name).get("assessment")
print(f"Assessment name: {assessment_name}")
return response


# [END recaptcha_enterprise_create_assessment]
39 changes: 28 additions & 11 deletions samples/snippets/test_create_assessment.py
Expand Up @@ -11,26 +11,31 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import multiprocessing
import os
import re
import time
import typing

from _pytest.capture import CaptureFixture
from flask import Flask, render_template, url_for
from google.cloud import recaptchaenterprise_v1
from google.cloud.recaptchaenterprise_v1 import Assessment
import pytest

from selenium import webdriver
from selenium.webdriver.chrome.webdriver import WebDriver

import create_assessment

# TODO(developer): Replace these variables before running the sample.
from annotate_assessment import annotate_assessment
from create_assessment import create_assessment
from create_site_key import create_site_key
from delete_site_key import delete_site_key


GOOGLE_CLOUD_PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
DOMAIN_NAME = "localhost"
# Switch the multi-processing style for Python > 3.7: https://github.com/pytest-dev/pytest-flask/issues/104
multiprocessing.set_start_method("fork")


@pytest.fixture(scope="session")
Expand Down Expand Up @@ -72,16 +77,28 @@ def recaptcha_site_key() -> str:


@pytest.mark.usefixtures("live_server")
def test_create_assessment(
def test_assessment(
capsys: CaptureFixture, recaptcha_site_key: str, browser: WebDriver
) -> None:
# Get token.
token, action = get_token(recaptcha_site_key, browser)
assess_token(recaptcha_site_key, token=token, action=action)
out, _ = capsys.readouterr()
assert re.search("The reCAPTCHA score for this token is: ", out)
score = out.rsplit(":", maxsplit=1)[1].strip()
# Create assessment.
assessment_response = assess_token(recaptcha_site_key, token=token, action=action)
score = str(assessment_response.risk_analysis.score)
client = recaptchaenterprise_v1.RecaptchaEnterpriseServiceClient()
# Parse the assessment_response.name which is of the format:
# {'project': 'my-project-id', 'assessment': 'assessment-id'}
assessment_name = client.parse_assessment_path(assessment_response.name).get(
"assessment"
)
assert assessment_name != ""
set_score(browser, score)

# Annotate assessment.
annotate_assessment(project_id=GOOGLE_CLOUD_PROJECT, assessment_id=assessment_name)
out, _ = capsys.readouterr()
assert re.search("Annotated response sent successfully !", out)


def get_token(recaptcha_site_key: str, browser: WebDriver) -> typing.Tuple:
browser.get(url_for("assess", site_key=recaptcha_site_key, _external=True))
Expand All @@ -100,8 +117,8 @@ def get_token(recaptcha_site_key: str, browser: WebDriver) -> typing.Tuple:
return token, action


def assess_token(recaptcha_site_key: str, token: str, action: str) -> None:
create_assessment.create_assessment(
def assess_token(recaptcha_site_key: str, token: str, action: str) -> Assessment:
return create_assessment(
project_id=GOOGLE_CLOUD_PROJECT,
recaptcha_site_key=recaptcha_site_key,
token=token,
Expand Down