Skip to content

Commit

Permalink
chore: Re-generated to pick up changes from googleapis. (#126)
Browse files Browse the repository at this point in the history
* changes without context

        autosynth cannot find the source of changes triggered by earlier changes in this
        repository, or by version upgrades to tools such as linters.

* feat: add the Tailing API to get a live stream of the tail end of filtered logs

PiperOrigin-RevId: 344435830

Source-Author: Google APIs <noreply@google.com>
Source-Date: Thu Nov 26 09:56:05 2020 -0800
Source-Repo: googleapis/googleapis
Source-Sha: e8857c4c36948e7e0500377cd7fcecbf2459afc8
Source-Link: googleapis/googleapis@e8857c4
  • Loading branch information
yoshi-automation committed Dec 12, 2020
1 parent ca3d751 commit 3a25c8c
Show file tree
Hide file tree
Showing 10 changed files with 503 additions and 4 deletions.
99 changes: 99 additions & 0 deletions google/cloud/logging_v2/proto/logging.proto
Expand Up @@ -125,6 +125,15 @@ service LoggingServiceV2 {
};
option (google.api.method_signature) = "parent";
}

// Streaming read of log entries as they are ingested. Until the stream is
// terminated, it will continue reading logs.
rpc TailLogEntries(stream TailLogEntriesRequest) returns (stream TailLogEntriesResponse) {
option (google.api.http) = {
post: "/v2/entries:tail"
body: "*"
};
}
}

// The parameters to DeleteLog.
Expand Down Expand Up @@ -254,6 +263,11 @@ message ListLogEntriesRequest {
// "billingAccounts/[BILLING_ACCOUNT_ID]"
// "folders/[FOLDER_ID]"
//
// May alternatively be one or more views
// projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
// organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
// billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
// folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
//
// Projects listed in the `project_ids` field are added to this list.
repeated string resource_names = 8 [
Expand Down Expand Up @@ -363,6 +377,19 @@ message ListLogsRequest {
// `nextPageToken` from the previous response. The values of other method
// parameters should be identical to those in the previous call.
string page_token = 3 [(google.api.field_behavior) = OPTIONAL];

// Optional. The resource name that owns the logs:
// projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
// organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
// billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
// folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
//
// To support legacy queries, it could also be:
// "projects/[PROJECT_ID]"
// "organizations/[ORGANIZATION_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]"
// "folders/[FOLDER_ID]"
repeated string resource_names = 8 [(google.api.field_behavior) = OPTIONAL];
}

// Result returned from ListLogs.
Expand All @@ -377,3 +404,75 @@ message ListLogsResponse {
// method again using the value of `nextPageToken` as `pageToken`.
string next_page_token = 2;
}

// The parameters to `TailLogEntries`.
message TailLogEntriesRequest {
// Required. Name of a parent resource from which to retrieve log entries:
//
// "projects/[PROJECT_ID]"
// "organizations/[ORGANIZATION_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]"
// "folders/[FOLDER_ID]"
//
// May alternatively be one or more views:
// "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
// "organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
// "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]"
repeated string resource_names = 1 [(google.api.field_behavior) = REQUIRED];

// Optional. A filter that chooses which log entries to return. See [Advanced
// Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters).
// Only log entries that match the filter are returned. An empty filter
// matches all log entries in the resources listed in `resource_names`.
// Referencing a parent resource that is not in `resource_names` will cause
// the filter to return no results. The maximum length of the filter is 20000
// characters.
string filter = 2 [(google.api.field_behavior) = OPTIONAL];

// Optional. The amount of time to buffer log entries at the server before
// being returned to prevent out of order results due to late arriving log
// entries. Valid values are between 0-60000 milliseconds. Defaults to 2000
// milliseconds.
google.protobuf.Duration buffer_window = 3 [(google.api.field_behavior) = OPTIONAL];
}

// Result returned from `TailLogEntries`.
message TailLogEntriesResponse {
// Information about entries that were omitted from the session.
message SuppressionInfo {
// An indicator of why entries were omitted.
enum Reason {
// Unexpected default.
REASON_UNSPECIFIED = 0;

// Indicates suppression occurred due to relevant entries being
// received in excess of rate limits. For quotas and limits, see
// [Logging API quotas and
// limits](https://cloud.google.com/logging/quotas#api-limits).
RATE_LIMIT = 1;

// Indicates suppression occurred due to the client not consuming
// responses quickly enough.
NOT_CONSUMED = 2;
}

// The reason that entries were omitted from the session.
Reason reason = 1;

// A lower bound on the count of entries omitted due to `reason`.
int32 suppressed_count = 2;
}

// A list of log entries. Each response in the stream will order entries with
// increasing values of `LogEntry.timestamp`. Ordering is not guaranteed
// between separate responses.
repeated LogEntry entries = 1;

// If entries that otherwise would have been included in the session were not
// sent back to the client, counts of relevant entries omitted from the
// session with the reason that they were not included. There will be at most
// one of each reason per response. The counts represent the number of
// suppressed entries since the last streamed response.
repeated SuppressionInfo suppression_info = 2;
}
Expand Up @@ -18,7 +18,16 @@
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
from typing import (
Dict,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources

import google.api_core.client_options as ClientOptions # type: ignore
Expand Down Expand Up @@ -430,6 +439,12 @@ async def list_log_entries(
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
Projects listed in the ``project_ids`` field are added
to this list.
This corresponds to the ``resource_names`` field
Expand Down Expand Up @@ -690,6 +705,56 @@ async def list_logs(
# Done; return the response.
return response

def tail_log_entries(
self,
requests: AsyncIterator[logging.TailLogEntriesRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[logging.TailLogEntriesResponse]]:
r"""Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Args:
requests (AsyncIterator[`~.logging.TailLogEntriesRequest`]):
The request object AsyncIterator. The parameters to `TailLogEntries`.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[~.logging.TailLogEntriesResponse]:
Result returned from ``TailLogEntries``.
"""

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.tail_log_entries,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.InternalServerError,
exceptions.ServiceUnavailable,
),
),
default_timeout=3600.0,
client_info=DEFAULT_CLIENT_INFO,
)

# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)

# Done; return the response.
return response


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
54 changes: 53 additions & 1 deletion google/cloud/logging_v2/services/logging_service_v2/client.py
Expand Up @@ -19,7 +19,17 @@
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
from typing import (
Callable,
Dict,
Optional,
Iterable,
Iterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources

from google.api_core import client_options as client_options_lib # type: ignore
Expand Down Expand Up @@ -598,6 +608,12 @@ def list_log_entries(
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
May alternatively be one or more views
projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]
Projects listed in the ``project_ids`` field are added
to this list.
This corresponds to the ``resource_names`` field
Expand Down Expand Up @@ -833,6 +849,42 @@ def list_logs(
# Done; return the response.
return response

def tail_log_entries(
self,
requests: Iterator[logging.TailLogEntriesRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[logging.TailLogEntriesResponse]:
r"""Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Args:
requests (Iterator[`~.logging.TailLogEntriesRequest`]):
The request object iterator. The parameters to `TailLogEntries`.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[~.logging.TailLogEntriesResponse]:
Result returned from ``TailLogEntries``.
"""

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.tail_log_entries]

# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)

# Done; return the response.
return response


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -186,6 +186,21 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=60.0,
client_info=client_info,
),
self.tail_log_entries: gapic_v1.method.wrap_method(
self.tail_log_entries,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.InternalServerError,
exceptions.ServiceUnavailable,
),
),
default_timeout=3600.0,
client_info=client_info,
),
}

@property
Expand Down Expand Up @@ -244,5 +259,17 @@ def list_logs(
]:
raise NotImplementedError()

@property
def tail_log_entries(
self,
) -> typing.Callable[
[logging.TailLogEntriesRequest],
typing.Union[
logging.TailLogEntriesResponse,
typing.Awaitable[logging.TailLogEntriesResponse],
],
]:
raise NotImplementedError()


__all__ = ("LoggingServiceV2Transport",)
Expand Up @@ -380,5 +380,33 @@ def list_logs(
)
return self._stubs["list_logs"]

@property
def tail_log_entries(
self,
) -> Callable[[logging.TailLogEntriesRequest], logging.TailLogEntriesResponse]:
r"""Return a callable for the tail log entries method over gRPC.
Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Returns:
Callable[[~.TailLogEntriesRequest],
~.TailLogEntriesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "tail_log_entries" not in self._stubs:
self._stubs["tail_log_entries"] = self.grpc_channel.stream_stream(
"/google.logging.v2.LoggingServiceV2/TailLogEntries",
request_serializer=logging.TailLogEntriesRequest.serialize,
response_deserializer=logging.TailLogEntriesResponse.deserialize,
)
return self._stubs["tail_log_entries"]


__all__ = ("LoggingServiceV2GrpcTransport",)
Expand Up @@ -390,5 +390,35 @@ def list_logs(
)
return self._stubs["list_logs"]

@property
def tail_log_entries(
self,
) -> Callable[
[logging.TailLogEntriesRequest], Awaitable[logging.TailLogEntriesResponse]
]:
r"""Return a callable for the tail log entries method over gRPC.
Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Returns:
Callable[[~.TailLogEntriesRequest],
Awaitable[~.TailLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "tail_log_entries" not in self._stubs:
self._stubs["tail_log_entries"] = self.grpc_channel.stream_stream(
"/google.logging.v2.LoggingServiceV2/TailLogEntries",
request_serializer=logging.TailLogEntriesRequest.serialize,
response_deserializer=logging.TailLogEntriesResponse.deserialize,
)
return self._stubs["tail_log_entries"]


__all__ = ("LoggingServiceV2GrpcAsyncIOTransport",)

0 comments on commit 3a25c8c

Please sign in to comment.